code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
# Load configuration defined in the metadata file
with open(snake_case__ ) as metadata_file:
__UpperCamelCase : Tuple = json.load(snake_case__ )
__UpperCamelCase : Optional[int] = LukeConfig(use_entity_aware_attention=snake_case__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__UpperCamelCase : Optional[Any] = torch.load(snake_case__ , map_location="cpu" )["module"]
# Load the entity vocab file
__UpperCamelCase : Optional[int] = load_original_entity_vocab(snake_case__ )
# add an entry for [MASK2]
__UpperCamelCase : Optional[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__UpperCamelCase : Any = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCamelCase : int = AddedToken("<ent>" , lstrip=snake_case__ , rstrip=snake_case__ )
__UpperCamelCase : Dict = AddedToken("<ent2>" , lstrip=snake_case__ , rstrip=snake_case__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , "r" ) as f:
__UpperCamelCase : Optional[int] = json.load(snake_case__ )
__UpperCamelCase : List[str] = "MLukeTokenizer"
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) , "w" ) as f:
json.dump(snake_case__ , snake_case__ )
with open(os.path.join(snake_case__ , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(snake_case__ , snake_case__ )
__UpperCamelCase : str = MLukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
__UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(["@"] )[0]
__UpperCamelCase : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0]
__UpperCamelCase : Optional[Any] = state_dict["embeddings.word_embeddings.weight"]
__UpperCamelCase : List[str] = word_emb[ent_init_index].unsqueeze(0 )
__UpperCamelCase : str = word_emb[enta_init_index].unsqueeze(0 )
__UpperCamelCase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__UpperCamelCase : Union[str, Any] = state_dict[bias_name]
__UpperCamelCase : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__UpperCamelCase : List[str] = decoder_bias[enta_init_index].unsqueeze(0 )
__UpperCamelCase : str = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCamelCase : Optional[int] = F"encoder.layer.{layer_index}.attention.self."
__UpperCamelCase : List[str] = state_dict[prefix + matrix_name]
__UpperCamelCase : Tuple = state_dict[prefix + matrix_name]
__UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCamelCase : Union[str, Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
__UpperCamelCase : Optional[int] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__UpperCamelCase : str = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__UpperCamelCase : Dict = state_dict["entity_predictions.bias"]
__UpperCamelCase : Optional[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__UpperCamelCase : List[str] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__UpperCamelCase : int = LukeForMaskedLM(config=snake_case__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__UpperCamelCase : Any = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__UpperCamelCase : Optional[Any] = state_dict[key]
else:
__UpperCamelCase : Dict = state_dict[key]
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = model.load_state_dict(snake_case__ , strict=snake_case__ )
if set(snake_case__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(snake_case__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__UpperCamelCase : Dict = MLukeTokenizer.from_pretrained(snake_case__ , task="entity_classification" )
__UpperCamelCase : List[str] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__UpperCamelCase : Union[str, Any] = (0, 9)
__UpperCamelCase : str = tokenizer(snake_case__ , entity_spans=[span] , return_tensors="pt" )
__UpperCamelCase : Optional[int] = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCamelCase : int = torch.Size((1, 33, 768) )
__UpperCamelCase : Any = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCamelCase : Optional[Any] = torch.Size((1, 1, 768) )
__UpperCamelCase : int = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__UpperCamelCase : Optional[int] = MLukeTokenizer.from_pretrained(snake_case__ )
__UpperCamelCase : Any = "Tokyo is the capital of <mask>."
__UpperCamelCase : Dict = (24, 30)
__UpperCamelCase : Any = tokenizer(snake_case__ , entity_spans=[span] , return_tensors="pt" )
__UpperCamelCase : List[Any] = model(**snake_case__ )
__UpperCamelCase : Any = encoding["input_ids"][0].tolist()
__UpperCamelCase : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__UpperCamelCase : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(snake_case__ )
__UpperCamelCase : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
__UpperCamelCase : int = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = ["[MASK]", "[PAD]", "[UNK]"]
__UpperCamelCase : List[Any] = [json.loads(snake_case__ ) for line in open(snake_case__ )]
__UpperCamelCase : Any = {}
for entry in data:
__UpperCamelCase : str = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__UpperCamelCase : List[str] = entity_id
break
__UpperCamelCase : Tuple = F"{language}:{entity_name}"
__UpperCamelCase : int = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_lowerCAmelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 298
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298
| 1
|
'''simple docstring'''
_lowerCAmelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowerCAmelCase = [None] * 10000000
_lowerCAmelCase = True
_lowerCAmelCase = False
def __lowerCAmelCase ( snake_case__ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__UpperCamelCase : int = chain(next_number(snake_case__ ) )
__UpperCamelCase : Optional[int] = number_chain
while number < 10_000_000:
__UpperCamelCase : Dict = number_chain
number *= 10
return number_chain
def __lowerCAmelCase ( snake_case__ = 10_000_000 ):
for i in range(1 , snake_case__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 298
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( ):
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case__ , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'{solution() = }')
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
| 1
|
'''simple docstring'''
import math
def __lowerCAmelCase ( snake_case__ ):
return math.sqrt(snake_case__ ) * math.sqrt(snake_case__ ) == num
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Union[str, Any] = 0
__UpperCamelCase : List[Any] = n
while left <= right:
__UpperCamelCase : Any = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase : Any = mid - 1
else:
__UpperCamelCase : Tuple = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCAmelCase ( snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
from itertools import product
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = sides_number
__UpperCamelCase : str = max_face_number * dice_number
__UpperCamelCase : Tuple = [0] * (max_total + 1)
__UpperCamelCase : str = 1
__UpperCamelCase : List[Any] = range(snake_case__ , max_face_number + 1 )
for dice_numbers in product(snake_case__ , repeat=snake_case__ ):
__UpperCamelCase : str = sum(snake_case__ )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__UpperCamelCase : List[Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Tuple = 9
__UpperCamelCase : str = 4 * 9
__UpperCamelCase : Any = 6
for peter_total in range(snake_case__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__UpperCamelCase : List[Any] = (4**9) * (6**6)
__UpperCamelCase : Any = peter_wins_count / total_games_number
__UpperCamelCase : Optional[Any] = round(snake_case__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Union[str, Any] = 3
__UpperCamelCase : int = (3_2, 3_2)
__UpperCamelCase : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def a_ (self ) -> Any:
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=_UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def a_ (self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase : Any = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def a_ (self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Optional[int] = self.dummy_cond_unet_upscale
__UpperCamelCase : Union[str, Any] = DDPMScheduler()
__UpperCamelCase : Tuple = DDIMScheduler(prediction_type="v_prediction" )
__UpperCamelCase : List[Any] = self.dummy_vae
__UpperCamelCase : Optional[Any] = self.dummy_text_encoder
__UpperCamelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCamelCase : Any = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Tuple = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
__UpperCamelCase : List[str] = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=3_5_0 , )
__UpperCamelCase : Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = "A painting of a squirrel eating a burger"
__UpperCamelCase : Any = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__UpperCamelCase : Tuple = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__UpperCamelCase : Dict = output.images
__UpperCamelCase : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__UpperCamelCase : Optional[int] = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=_UpperCAmelCase , )[0]
__UpperCamelCase : Dict = image[0, -3:, -3:, -1]
__UpperCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
__UpperCamelCase : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__UpperCamelCase : List[Any] = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ (self ) -> int:
__UpperCamelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : List[str] = self.dummy_cond_unet_upscale
__UpperCamelCase : Dict = DDPMScheduler()
__UpperCamelCase : Any = DDIMScheduler(prediction_type="v_prediction" )
__UpperCamelCase : List[str] = self.dummy_vae
__UpperCamelCase : Tuple = self.dummy_text_encoder
__UpperCamelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCamelCase : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Dict = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
__UpperCamelCase : str = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=3_5_0 , )
__UpperCamelCase : List[str] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__UpperCamelCase : str = "A painting of a squirrel eating a burger"
__UpperCamelCase : Optional[int] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__UpperCamelCase : Optional[int] = output.images
assert image.shape[0] == 2
__UpperCamelCase : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__UpperCamelCase : Any = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__UpperCamelCase : Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.dummy_cond_unet_upscale
__UpperCamelCase : Optional[Any] = DDPMScheduler()
__UpperCamelCase : Union[str, Any] = DDIMScheduler(prediction_type="v_prediction" )
__UpperCamelCase : int = self.dummy_vae
__UpperCamelCase : List[str] = self.dummy_text_encoder
__UpperCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCamelCase : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : List[str] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
__UpperCamelCase : List[Any] = unet.half()
__UpperCamelCase : List[str] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase : int = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=3_5_0 , )
__UpperCamelCase : Dict = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__UpperCamelCase : str = "A painting of a squirrel eating a burger"
__UpperCamelCase : Optional[int] = torch.manual_seed(0 )
__UpperCamelCase : List[str] = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , ).images
__UpperCamelCase : int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__UpperCamelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
__UpperCamelCase : List[str] = "stabilityai/stable-diffusion-x4-upscaler"
__UpperCamelCase : int = StableDiffusionUpscalePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__UpperCamelCase : List[Any] = "a cat sitting on a park bench"
__UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCamelCase : Tuple = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="np" , )
__UpperCamelCase : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__UpperCamelCase : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
__UpperCamelCase : str = "stabilityai/stable-diffusion-x4-upscaler"
__UpperCamelCase : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__UpperCamelCase : Tuple = "a cat sitting on a park bench"
__UpperCamelCase : List[Any] = torch.manual_seed(0 )
__UpperCamelCase : Any = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="np" , )
__UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ (self ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__UpperCamelCase : int = "stabilityai/stable-diffusion-x4-upscaler"
__UpperCamelCase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : Optional[int] = "a cat sitting on a park bench"
__UpperCamelCase : Tuple = torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , output_type="np" , )
__UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 298
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
| 1
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ):
__UpperCamelCase : List[str] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
__UpperCamelCase : List[str] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCamelCase : List[str] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
__UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCamelCase : Union[str, Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
__UpperCamelCase : int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCamelCase : Dict = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
__UpperCamelCase : Dict = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
if split_mlp_wi:
__UpperCamelCase : Dict = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
__UpperCamelCase : str = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
__UpperCamelCase : Union[str, Any] = (wi_a, wi_a)
else:
__UpperCamelCase : Any = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
__UpperCamelCase : Union[str, Any] = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def __lowerCAmelCase ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ):
__UpperCamelCase : Optional[Any] = traverse_util.flatten_dict(variables["target"] )
__UpperCamelCase : List[str] = {"/".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCamelCase : Dict = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , snake_case__ )
__UpperCamelCase : Union[str, Any] = collections.OrderedDict()
# Shared embeddings.
__UpperCamelCase : Tuple = old["token_embedder/embedding"]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase : List[Any] = tax_layer_norm_lookup(snake_case__ , snake_case__ , "encoder" , "pre_attention_layer_norm" )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = tax_attention_lookup(snake_case__ , snake_case__ , "encoder" , "attention" )
__UpperCamelCase : Dict = layer_norm
__UpperCamelCase : List[str] = k.T
__UpperCamelCase : List[Any] = o.T
__UpperCamelCase : str = q.T
__UpperCamelCase : Optional[int] = v.T
# Block i, layer 1 (MLP).
__UpperCamelCase : Optional[int] = tax_layer_norm_lookup(snake_case__ , snake_case__ , "encoder" , "pre_mlp_layer_norm" )
__UpperCamelCase , __UpperCamelCase : str = tax_mlp_lookup(snake_case__ , snake_case__ , "encoder" , snake_case__ )
__UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
__UpperCamelCase : List[str] = wi[0].T
__UpperCamelCase : Optional[int] = wi[1].T
else:
__UpperCamelCase : Optional[Any] = wi.T
__UpperCamelCase : Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase : Dict = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , "encoder" ).T
__UpperCamelCase : str = old["encoder/encoder_norm/scale"]
if not scalable_attention:
__UpperCamelCase : Optional[int] = tax_relpos_bias_lookup(
snake_case__ , 0 , "encoder" ).T
__UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
snake_case__ , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase : List[Any] = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_self_attention_layer_norm" )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = tax_attention_lookup(snake_case__ , snake_case__ , "decoder" , "self_attention" )
__UpperCamelCase : Tuple = layer_norm
__UpperCamelCase : Dict = k.T
__UpperCamelCase : List[Any] = o.T
__UpperCamelCase : int = q.T
__UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
__UpperCamelCase : Optional[Any] = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_cross_attention_layer_norm" )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = tax_attention_lookup(snake_case__ , snake_case__ , "decoder" , "encoder_decoder_attention" )
__UpperCamelCase : Optional[int] = layer_norm
__UpperCamelCase : Union[str, Any] = k.T
__UpperCamelCase : Optional[Any] = o.T
__UpperCamelCase : Tuple = q.T
__UpperCamelCase : Union[str, Any] = v.T
# Block i, layer 2 (MLP).
__UpperCamelCase : Dict = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_mlp_layer_norm" )
__UpperCamelCase , __UpperCamelCase : List[Any] = tax_mlp_lookup(snake_case__ , snake_case__ , "decoder" , snake_case__ )
__UpperCamelCase : Dict = layer_norm
if split_mlp_wi:
__UpperCamelCase : List[str] = wi[0].T
__UpperCamelCase : Optional[int] = wi[1].T
else:
__UpperCamelCase : Union[str, Any] = wi.T
__UpperCamelCase : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase : List[Any] = tax_relpos_bias_lookup(snake_case__ , snake_case__ , "decoder" ).T
__UpperCamelCase : int = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCamelCase : int = old["decoder/logits_dense/kernel"].T
return new
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase : List[Any] = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__UpperCamelCase : Dict = state_dict["shared.weight"]
return state_dict
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(snake_case__ )
__UpperCamelCase : Any = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
__UpperCamelCase : Any = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ):
__UpperCamelCase : Union[str, Any] = MTaConfig.from_json_file(snake_case__ )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCamelCase : Optional[int] = UMTaEncoderModel(snake_case__ )
else:
__UpperCamelCase : Any = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("Done" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
_lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 298
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298
| 1
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" )
__UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
__UpperCamelCase : Union[str, Any] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
__UpperCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(snake_case__ ),
}
__UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ )
__UpperCamelCase : str = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = hf_param.shape
__UpperCamelCase : List[Any] = to_torch(params[gluon_param] )
__UpperCamelCase : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__UpperCamelCase : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__UpperCamelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase : BertSelfAttention = layer.attention.self
__UpperCamelCase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__UpperCamelCase : str = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__UpperCamelCase : Tuple = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__UpperCamelCase : BertSelfOutput = layer.attention.output
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__UpperCamelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__UpperCamelCase : BertIntermediate = layer.intermediate
__UpperCamelCase : Dict = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__UpperCamelCase : BertOutput = layer.output
__UpperCamelCase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__UpperCamelCase : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" )
__UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"]
# Get gluon output
__UpperCamelCase : Dict = mx.nd.array([input_ids] )
__UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
__UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
__UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0]
__UpperCamelCase : List[Any] = output_gluon[0].asnumpy()
__UpperCamelCase : Optional[int] = output_hf[0].detach().numpy()
__UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[Any] = args.log_outputs
__UpperCamelCase : Optional[Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
__UpperCamelCase : str = load_metric("wer" )
__UpperCamelCase : List[Any] = load_metric("cer" )
# compute metrics
__UpperCamelCase : Dict = wer.compute(references=result["target"] , predictions=result["prediction"] )
__UpperCamelCase : Optional[Any] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
__UpperCamelCase : Dict = F"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(F"{dataset_id}_eval_results.txt" , "w" ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__UpperCamelCase : Tuple = F"log_{dataset_id}_predictions.txt"
__UpperCamelCase : int = F"log_{dataset_id}_targets.txt"
with open(snake_case__ , "w" ) as p, open(snake_case__ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case__ , snake_case__ ):
p.write(F"{i}" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F"{i}" + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case__ , with_indices=snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__UpperCamelCase : Union[str, Any] = re.sub(snake_case__ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__UpperCamelCase : List[str] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
__UpperCamelCase : int = " ".join(text.split(snake_case__ ) )
return text
def __lowerCAmelCase ( snake_case__ ):
# load dataset
__UpperCamelCase : Union[str, Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__UpperCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
__UpperCamelCase : List[str] = feature_extractor.sampling_rate
# resample audio
__UpperCamelCase : List[Any] = dataset.cast_column("audio" , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__UpperCamelCase : Union[str, Any] = 0 if torch.cuda.is_available() else -1
__UpperCamelCase : Optional[Any] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ ):
__UpperCamelCase : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__UpperCamelCase : int = prediction["text"]
__UpperCamelCase : Tuple = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
__UpperCamelCase : str = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
_lowerCAmelCase = parser.parse_args()
main(args)
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''ConvNextFeatureExtractor''']
_lowerCAmelCase = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 298
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
| 1
|
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_lowerCAmelCase = get_logger(__name__)
class A ( enum.Enum ):
'''simple docstring'''
A = "all_checks"
A = "basic_checks"
A = "no_checks"
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=None ):
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(snake_case__ ) - set(snake_case__ ) ) )
__UpperCamelCase : Optional[Any] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__UpperCamelCase : int = " for " + verification_name if verification_name is not None else ""
if len(snake_case__ ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
__UpperCamelCase : Dict = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(snake_case__ ) > 0:
raise NonMatchingSplitsSizesError(str(snake_case__ ) )
logger.info("All the splits matched successfully." )
def __lowerCAmelCase ( snake_case__ , snake_case__ = True ):
if record_checksum:
__UpperCamelCase : int = shaaaa()
with open(snake_case__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"" ):
m.update(snake_case__ )
__UpperCamelCase : Optional[int] = m.hexdigest()
else:
__UpperCamelCase : Union[str, Any] = None
return {"num_bytes": os.path.getsize(snake_case__ ), "checksum": checksum}
def __lowerCAmelCase ( snake_case__ ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 298
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298
| 1
|
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class A ( nn.Module ):
'''simple docstring'''
def __init__(self ) -> Dict:
super().__init__()
__UpperCamelCase : int = nn.Linear(3 , 4 )
__UpperCamelCase : List[str] = nn.BatchNormad(4 )
__UpperCamelCase : str = nn.Linear(4 , 5 )
def a_ (self , _UpperCAmelCase ) -> Dict:
return self.lineara(self.batchnorm(self.lineara(_UpperCAmelCase ) ) )
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCAmelCase , model.state_dict() )
__UpperCamelCase : int = os.path.join(_UpperCAmelCase , "index.json" )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__UpperCamelCase : str = os.path.join(_UpperCAmelCase , f"{key}.dat" )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# TODO: add tests on the fact weights are properly loaded
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__UpperCamelCase : Dict = torch.randn(2 , 3 , dtype=_UpperCAmelCase )
with TemporaryDirectory() as tmp_dir:
__UpperCamelCase : List[Any] = offload_weight(_UpperCAmelCase , "weight" , _UpperCAmelCase , {} )
__UpperCamelCase : Any = os.path.join(_UpperCAmelCase , "weight.dat" )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
self.assertDictEqual(_UpperCAmelCase , {"weight": {"shape": [2, 3], "dtype": str(_UpperCAmelCase ).split("." )[1]}} )
__UpperCamelCase : Tuple = load_offloaded_weight(_UpperCAmelCase , index["weight"] )
self.assertTrue(torch.equal(_UpperCAmelCase , _UpperCAmelCase ) )
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[Any] = ModelForTest()
__UpperCamelCase : Union[str, Any] = model.state_dict()
__UpperCamelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "linear2" not in k}
__UpperCamelCase : str = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = OffloadedWeightsLoader(state_dict=_UpperCAmelCase , save_folder=_UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(_UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_UpperCAmelCase , weight_map[key] ) )
__UpperCamelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" in k}
__UpperCamelCase : Tuple = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = OffloadedWeightsLoader(state_dict=_UpperCAmelCase , save_folder=_UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(_UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_UpperCAmelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCAmelCase , _UpperCAmelCase )
# Duplicates are removed
__UpperCamelCase : int = OffloadedWeightsLoader(state_dict=_UpperCAmelCase , save_folder=_UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(_UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_UpperCAmelCase , weight_map[key] ) )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[Any] = {"a.1": 0, "a.10": 1, "a.2": 2}
__UpperCamelCase : int = extract_submodules_state_dict(_UpperCAmelCase , ["a.1", "a.2"] )
self.assertDictEqual(_UpperCAmelCase , {"a.1": 0, "a.2": 2} )
__UpperCamelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
__UpperCamelCase : int = extract_submodules_state_dict(_UpperCAmelCase , ["a.1", "a.2"] )
self.assertDictEqual(_UpperCAmelCase , {"a.1.a": 0, "a.2.a": 2} )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298
| 1
|
'''simple docstring'''
import numpy as np
import datasets
_lowerCAmelCase = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_lowerCAmelCase = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_lowerCAmelCase = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def a_ (self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
# convert to numpy arrays
__UpperCamelCase : List[str] = np.array(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = np.array(_UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCamelCase : Union[str, Any] = X - np.mean(_UpperCAmelCase )
__UpperCamelCase : int = np.cov(reference_distribution.T )
try:
__UpperCamelCase : str = np.linalg.inv(_UpperCAmelCase )
except np.linalg.LinAlgError:
__UpperCamelCase : str = np.linalg.pinv(_UpperCAmelCase )
__UpperCamelCase : int = np.dot(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = np.dot(_UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations(snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations_with_dp_array(
snake_case__ , snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__UpperCamelCase : List[str] = answer
return answer
__UpperCamelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [0] * (target + 1)
__UpperCamelCase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 298
| 1
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
__UpperCamelCase : Dict = s_dict.pop(snake_case__ )
elif "subsample" in key:
__UpperCamelCase : List[str] = s_dict.pop(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase , __UpperCamelCase : List[Any] = emb.weight.shape
__UpperCamelCase : Tuple = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
__UpperCamelCase : Union[str, Any] = emb.weight.data
return lin_layer
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : int = torch.load(snake_case__ , map_location="cpu" )
__UpperCamelCase : Any = mam_aaa["args"]
__UpperCamelCase : int = mam_aaa["model"]
__UpperCamelCase : Any = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(snake_case__ )
rename_keys(snake_case__ )
__UpperCamelCase : Dict = state_dict["decoder.embed_tokens.weight"].shape[0]
__UpperCamelCase : int = args.share_decoder_input_output_embed
__UpperCamelCase : str = [int(snake_case__ ) for i in args.conv_kernel_sizes.split("," )]
__UpperCamelCase : Optional[int] = SpeechaTextConfig(
vocab_size=snake_case__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(snake_case__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=snake_case__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=snake_case__ , num_beams=5 , max_length=200 , use_cache=snake_case__ , decoder_start_token_id=2 , early_stopping=snake_case__ , )
__UpperCamelCase : List[Any] = SpeechaTextForConditionalGeneration(snake_case__ )
__UpperCamelCase , __UpperCamelCase : Tuple = model.model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0 and not set(snake_case__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
__UpperCamelCase : Dict = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__UpperCamelCase : int = lm_head_weights
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_lowerCAmelCase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 298
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 298
| 1
|
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 298
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict:
__UpperCamelCase : Dict = parent
__UpperCamelCase : Any = do_resize
__UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8}
__UpperCamelCase : Any = size_divisor
__UpperCamelCase : Optional[int] = do_rescale
__UpperCamelCase : Union[str, Any] = rescale_factor
__UpperCamelCase : int = do_normalize
__UpperCamelCase : List[Any] = do_center_crop
__UpperCamelCase : Optional[int] = image_mean
__UpperCamelCase : Tuple = image_std
__UpperCamelCase : Tuple = do_pad
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Dict = min_resolution
__UpperCamelCase : Optional[Any] = max_resolution
def a_ (self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
__UpperCamelCase : List[str] = self.size["shortest_edge"]
__UpperCamelCase : Optional[int] = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
__UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w
else:
__UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size
__UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size:
__UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = newh * scale
__UpperCamelCase : Union[str, Any] = neww * scale
__UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCamelCase , __UpperCamelCase : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCamelCase : int = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BridgeTowerImageProcessor if is_vision_available() else None
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self )
@property
def a_ (self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) )
def a_ (self ) -> List[str]:
pass
def a_ (self ) -> List[Any]:
# Initialize image processor
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> Tuple:
# Initialize image processor
__UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> int:
# Initialize image processor
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 298
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
_lowerCAmelCase = {'''bert_for_seq_generation''': 512}
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = []
A = ["input_ids", "attention_mask"]
def __init__(self , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<::::>" , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
__UpperCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__UpperCamelCase : List[Any] = vocab_file
__UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def a_ (self ) -> Any:
return self.sp_model.get_piece_size()
def a_ (self ) -> int:
__UpperCamelCase : Optional[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Optional[int]:
__UpperCamelCase : Dict = self.__dict__.copy()
__UpperCamelCase : List[str] = None
return state
def __setstate__(self , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ (self , _UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
return self.sp_model.piece_to_id(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : Union[str, Any] = self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def a_ (self , _UpperCAmelCase ) -> int:
__UpperCamelCase : List[str] = []
__UpperCamelCase : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
__UpperCamelCase : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCamelCase : Tuple = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , "wb" ) as fi:
__UpperCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 298
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" )
__UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
__UpperCamelCase : Union[str, Any] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
__UpperCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(snake_case__ ),
}
__UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ )
__UpperCamelCase : str = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = hf_param.shape
__UpperCamelCase : List[Any] = to_torch(params[gluon_param] )
__UpperCamelCase : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__UpperCamelCase : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__UpperCamelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase : BertSelfAttention = layer.attention.self
__UpperCamelCase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__UpperCamelCase : str = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__UpperCamelCase : Tuple = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__UpperCamelCase : BertSelfOutput = layer.attention.output
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__UpperCamelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__UpperCamelCase : BertIntermediate = layer.intermediate
__UpperCamelCase : Dict = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__UpperCamelCase : BertOutput = layer.output
__UpperCamelCase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__UpperCamelCase : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" )
__UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"]
# Get gluon output
__UpperCamelCase : Dict = mx.nd.array([input_ids] )
__UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
__UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
__UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0]
__UpperCamelCase : List[Any] = output_gluon[0].asnumpy()
__UpperCamelCase : Optional[int] = output_hf[0].detach().numpy()
__UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 298
| 1
|
'''simple docstring'''
_lowerCAmelCase = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
_lowerCAmelCase = frozenset(['''prompt''', '''negative_prompt'''])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(['''image'''])
_lowerCAmelCase = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_lowerCAmelCase = frozenset(['''image'''])
_lowerCAmelCase = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
_lowerCAmelCase = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
_lowerCAmelCase = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
_lowerCAmelCase = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
_lowerCAmelCase = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_lowerCAmelCase = frozenset(['''image''', '''mask_image'''])
_lowerCAmelCase = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_lowerCAmelCase = frozenset(['''example_image''', '''image''', '''mask_image'''])
_lowerCAmelCase = frozenset(['''class_labels'''])
_lowerCAmelCase = frozenset(['''class_labels'''])
_lowerCAmelCase = frozenset(['''batch_size'''])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(['''batch_size'''])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
_lowerCAmelCase = frozenset(['''prompt''', '''negative_prompt'''])
_lowerCAmelCase = frozenset(['''input_tokens'''])
_lowerCAmelCase = frozenset(['''input_tokens'''])
| 298
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def __lowerCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __lowerCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@require_beam
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> Optional[Any]:
import apache_beam as beam
__UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet
__UpperCamelCase : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
__UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> str:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a_ (self ) -> List[str]:
__UpperCamelCase : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
__UpperCamelCase : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase = {
'''yjernite/retribert-base-uncased''': 512,
}
_lowerCAmelCase = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RetriBertTokenizer
A = ["input_ids", "attention_mask"]
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> str:
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
__UpperCamelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCAmelCase ) != tokenize_chinese_chars
):
__UpperCamelCase : List[Any] = getattr(_UpperCAmelCase , normalizer_state.pop("type" ) )
__UpperCamelCase : List[str] = do_lower_case
__UpperCamelCase : str = strip_accents
__UpperCamelCase : Dict = tokenize_chinese_chars
__UpperCamelCase : Union[str, Any] = normalizer_class(**_UpperCAmelCase )
__UpperCamelCase : Any = do_lower_case
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None ) -> int:
__UpperCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
__UpperCamelCase : Optional[Any] = [self.sep_token_id]
__UpperCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
__UpperCamelCase : str = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 298
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> Any:
__UpperCamelCase : str = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase : List[str] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__UpperCamelCase : Tuple = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Optional[int] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__UpperCamelCase : Optional[int] = {"unk_token": "<unk>"}
__UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
__UpperCamelCase : str = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def a_ (self , **_UpperCAmelCase ) -> str:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **_UpperCAmelCase )
def a_ (self , **_UpperCAmelCase ) -> Tuple:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **_UpperCAmelCase )
def a_ (self , **_UpperCAmelCase ) -> Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self ) -> int:
shutil.rmtree(self.tmpdirname )
def a_ (self ) -> int:
__UpperCamelCase : str = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__UpperCamelCase : Any = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Optional[int] = self.get_tokenizer()
__UpperCamelCase : Dict = self.get_rust_tokenizer()
__UpperCamelCase : Tuple = self.get_image_processor()
__UpperCamelCase : List[Any] = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
__UpperCamelCase : Tuple = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase : Any = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Any = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__UpperCamelCase : Dict = self.get_image_processor(do_normalize=_UpperCAmelCase )
__UpperCamelCase : List[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Optional[int] = self.get_image_processor()
__UpperCamelCase : List[str] = self.get_tokenizer()
__UpperCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__UpperCamelCase : Dict = self.prepare_image_inputs()
__UpperCamelCase : Optional[Any] = image_processor(_UpperCAmelCase , return_tensors="np" )
__UpperCamelCase : int = processor(images=_UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ (self ) -> str:
__UpperCamelCase : str = self.get_image_processor()
__UpperCamelCase : List[Any] = self.get_tokenizer()
__UpperCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = "lower newer"
__UpperCamelCase : Dict = processor(text=_UpperCAmelCase , return_tensors="np" )
__UpperCamelCase : int = tokenizer(_UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = self.get_image_processor()
__UpperCamelCase : Dict = self.get_tokenizer()
__UpperCamelCase : List[Any] = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = "lower newer"
__UpperCamelCase : Any = self.prepare_image_inputs()
__UpperCamelCase : List[Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def a_ (self ) -> Optional[int]:
__UpperCamelCase : str = "google/owlvit-base-patch32"
__UpperCamelCase : Dict = OwlViTProcessor.from_pretrained(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = ["cat", "nasa badge"]
__UpperCamelCase : Optional[int] = processor(text=_UpperCAmelCase )
__UpperCamelCase : Dict = 1_6
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def a_ (self ) -> Any:
__UpperCamelCase : int = "google/owlvit-base-patch32"
__UpperCamelCase : List[Any] = OwlViTProcessor.from_pretrained(_UpperCAmelCase )
__UpperCamelCase : List[Any] = [["cat", "nasa badge"], ["person"]]
__UpperCamelCase : Dict = processor(text=_UpperCAmelCase )
__UpperCamelCase : List[str] = 1_6
__UpperCamelCase : Optional[Any] = len(_UpperCAmelCase )
__UpperCamelCase : int = max([len(_UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def a_ (self ) -> int:
__UpperCamelCase : str = "google/owlvit-base-patch32"
__UpperCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(_UpperCAmelCase )
__UpperCamelCase : List[Any] = ["cat", "nasa badge"]
__UpperCamelCase : Optional[Any] = processor(text=_UpperCAmelCase )
__UpperCamelCase : Dict = 1_6
__UpperCamelCase : Union[str, Any] = inputs["input_ids"]
__UpperCamelCase : Tuple = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def a_ (self ) -> Any:
__UpperCamelCase : str = self.get_image_processor()
__UpperCamelCase : Dict = self.get_tokenizer()
__UpperCamelCase : Any = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__UpperCamelCase : str = self.prepare_image_inputs()
__UpperCamelCase : List[Any] = self.prepare_image_inputs()
__UpperCamelCase : Optional[int] = processor(images=_UpperCAmelCase , query_images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[Any] = self.get_image_processor()
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : Any = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__UpperCamelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase : Optional[int] = processor.batch_decode(_UpperCAmelCase )
__UpperCamelCase : List[str] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 298
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations(snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations_with_dp_array(
snake_case__ , snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__UpperCamelCase : List[str] = answer
return answer
__UpperCamelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [0] * (target + 1)
__UpperCamelCase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 298
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = '''RegNetConfig'''
# Base docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = '''tabby, tabby cat'''
_lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , )
__UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) )
__UpperCamelCase : Dict = self.normalization(_UpperCAmelCase )
__UpperCamelCase : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = config.num_channels
__UpperCamelCase : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def a_ (self , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) )
__UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" )
__UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase )
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
__UpperCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def a_ (self , _UpperCAmelCase ) -> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase )
for layer_module in self.attention:
__UpperCamelCase : str = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1
__UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : List[Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase : Optional[Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ),
]
__UpperCamelCase : Dict = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Tuple = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : str = in_channels != out_channels or stride != 1
__UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : Union[str, Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCamelCase : Union[str, Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ),
]
__UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> int:
__UpperCamelCase : str = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Any = layer_module(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ),
*[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def a_ (self , _UpperCAmelCase ) -> Any:
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase : Any = hidden_states + (hidden_state,)
__UpperCamelCase : Any = stage_module(_UpperCAmelCase )
if output_hidden_states:
__UpperCamelCase : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
A = RegNetConfig
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = config
__UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" )
__UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" )
__UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
@unpack_inputs
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : str = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : List[str] = encoder_outputs[0]
__UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
__UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = RegNetConfig
A = "regnet"
A = "pixel_values"
@property
def a_ (self ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Tuple = self.regnet(
pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = config.num_labels
__UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
# classification head
__UpperCamelCase : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Dict = self.regnet(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase )
__UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase )
__UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
if not return_dict:
__UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 298
| 1
|
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase = get_logger(__name__)
_lowerCAmelCase = Path(__file__).parent / '''model_card_template.md'''
_lowerCAmelCase = uuida().hex
_lowerCAmelCase = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __lowerCAmelCase ( snake_case__ = None ):
__UpperCamelCase : Tuple = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"; torch/{_torch_version}"
if is_flax_available():
ua += F"; jax/{_jax_version}"
ua += F"; flax/{_flax_version}"
if is_onnx_available():
ua += F"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case__ , snake_case__ ):
ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(snake_case__ , snake_case__ ):
ua += "; " + user_agent
return ua
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = None ):
if token is None:
__UpperCamelCase : Union[str, Any] = HfFolder.get_token()
if organization is None:
__UpperCamelCase : List[Any] = whoami(snake_case__ )["name"]
return F"{username}/{model_id}"
else:
return F"{organization}/{model_id}"
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(snake_case__ , "local_rank" ) and args.local_rank not in [-1, 0]:
return
__UpperCamelCase : Tuple = args.hub_token if hasattr(snake_case__ , "hub_token" ) else None
__UpperCamelCase : str = get_full_repo_name(snake_case__ , token=snake_case__ )
__UpperCamelCase : Optional[Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case__ , model_name=snake_case__ , repo_name=snake_case__ , dataset_name=args.dataset_name if hasattr(snake_case__ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case__ , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case__ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case__ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case__ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case__ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case__ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case__ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case__ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case__ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case__ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
__UpperCamelCase : Optional[int] = os.path.join(args.output_dir , "README.md" )
model_card.save(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
__UpperCamelCase : Tuple = str(Path(snake_case__ ).as_posix() )
__UpperCamelCase : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case__ )
if search is None:
return None
__UpperCamelCase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
_lowerCAmelCase = os.path.join(hf_cache_home, '''diffusers''')
def __lowerCAmelCase ( snake_case__ = None , snake_case__ = None ):
if new_cache_dir is None:
__UpperCamelCase : str = DIFFUSERS_CACHE
if old_cache_dir is None:
__UpperCamelCase : Any = old_diffusers_cache
__UpperCamelCase : Union[str, Any] = Path(snake_case__ ).expanduser()
__UpperCamelCase : Dict = Path(snake_case__ ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__UpperCamelCase : Tuple = new_cache_dir / old_blob_path.relative_to(snake_case__ )
new_blob_path.parent.mkdir(parents=snake_case__ , exist_ok=snake_case__ )
os.replace(snake_case__ , snake_case__ )
try:
os.symlink(snake_case__ , snake_case__ )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase = int(f.read())
except ValueError:
_lowerCAmelCase = 0
if cache_version < 1:
_lowerCAmelCase = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __lowerCAmelCase ( snake_case__ , snake_case__ = None ):
if variant is not None:
__UpperCamelCase : Union[str, Any] = weights_name.split("." )
__UpperCamelCase : Any = splits[:-1] + [variant] + splits[-1:]
__UpperCamelCase : Optional[int] = ".".join(snake_case__ )
return weights_name
def __lowerCAmelCase ( snake_case__ , *,
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , ):
__UpperCamelCase : Any = str(snake_case__ )
if os.path.isfile(snake_case__ ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case__ ):
if os.path.isfile(os.path.join(snake_case__ , snake_case__ ) ):
# Load from a PyTorch checkpoint
__UpperCamelCase : Dict = os.path.join(snake_case__ , snake_case__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case__ , snake_case__ , snake_case__ ) ):
__UpperCamelCase : Dict = os.path.join(snake_case__ , snake_case__ , snake_case__ )
return model_file
else:
raise EnvironmentError(
F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case__ ).base_version ) >= version.parse("0.20.0" )
):
try:
__UpperCamelCase : Any = hf_hub_download(
snake_case__ , filename=_add_variant(snake_case__ , snake_case__ ) , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , user_agent=snake_case__ , subfolder=snake_case__ , revision=revision or commit_hash , )
warnings.warn(
F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , snake_case__ , )
return model_file
except: # noqa: E722
warnings.warn(
F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case__ , snake_case__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(snake_case__ , snake_case__ )}' so that the correct variant file can be added." , snake_case__ , )
try:
# 2. Load model file as usual
__UpperCamelCase : Any = hf_hub_download(
snake_case__ , filename=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , user_agent=snake_case__ , subfolder=snake_case__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"this model name. Check the model page at "
F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
F" directory containing a file named {weights_name} or"
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
F"containing a file named {weights_name}" )
| 298
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = torch.exp(snake_case__ )
__UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i)
__UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case__ ) - B / A
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
__UpperCamelCase : Any = config.output_attentions
__UpperCamelCase : Dict = config.output_hidden_states
__UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )]
def a_ (self , _UpperCAmelCase ) -> int:
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase : str = x
else:
__UpperCamelCase : List[Any] = x
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ()
__UpperCamelCase : Tuple = ()
__UpperCamelCase : Dict = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase : Tuple = all_hidden_states + (hidden_states,)
__UpperCamelCase : Optional[int] = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],)
__UpperCamelCase : Any = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : int = current_outputs + (all_attentions,)
__UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase : Dict = highway_exit[0]
__UpperCamelCase : Any = entropy(_UpperCAmelCase )
__UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
__UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase : int = all_hidden_states + (hidden_states,)
__UpperCamelCase : Dict = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : Optional[int] = outputs + (all_attentions,)
__UpperCamelCase : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = config
__UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase )
__UpperCamelCase : str = BertPooler(_UpperCAmelCase )
self.init_weights()
def a_ (self ) -> Any:
self.encoder.init_highway_pooler(self.pooler )
def a_ (self ) -> Optional[int]:
return self.embeddings.word_embeddings
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : int = value
def a_ (self , _UpperCAmelCase ) -> Tuple:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__UpperCamelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase : Any = encoder_attention_mask[:, None, None, :]
__UpperCamelCase : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase : Optional[int] = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__UpperCamelCase : Union[str, Any] = encoder_outputs[0]
__UpperCamelCase : Any = self.pooler(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : Tuple = message
__UpperCamelCase : Union[str, Any] = exit_layer # start from 1!
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__()
__UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase )
__UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def a_ (self , _UpperCAmelCase ) -> Any:
# Pooler
__UpperCamelCase : Optional[int] = encoder_outputs[0]
__UpperCamelCase : str = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase : Dict = bmodel_output[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Any = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Any:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : List[Any] = config.num_labels
__UpperCamelCase : List[Any] = config.num_hidden_layers
__UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase )
__UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int:
__UpperCamelCase : int = self.num_layers
try:
__UpperCamelCase : Tuple = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase : str = outputs[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Dict = self.classifier(_UpperCAmelCase )
__UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase : int = e.message
__UpperCamelCase : Optional[Any] = e.exit_layer
__UpperCamelCase : Optional[int] = outputs[0]
if not self.training:
__UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : List[str] = MSELoss()
__UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Dict = CrossEntropyLoss()
__UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase : List[Any] = []
for highway_exit in outputs[-1]:
__UpperCamelCase : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Union[str, Any] = MSELoss()
__UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Optional[Any] = CrossEntropyLoss()
__UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
__UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase : Dict = (loss,) + outputs
if not self.training:
__UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 298
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = StableDiffusionInstructPixaPixPipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A = IMAGE_TO_IMAGE_IMAGE_PARAMS
A = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a_ (self ) -> int:
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
__UpperCamelCase : List[str] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCamelCase : List[str] = CLIPTextModel(_UpperCAmelCase )
__UpperCamelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCamelCase : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Union[str, Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" )
if str(_UpperCAmelCase ).startswith("mps" ):
__UpperCamelCase : Dict = torch.manual_seed(_UpperCAmelCase )
else:
__UpperCamelCase : List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__UpperCamelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def a_ (self ) -> Any:
__UpperCamelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Any = self.get_dummy_components()
__UpperCamelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__UpperCamelCase : List[str] = self.get_dummy_inputs(_UpperCAmelCase )
__UpperCamelCase : Any = sd_pipe(**_UpperCAmelCase ).images
__UpperCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase : List[str] = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Union[str, Any] = self.get_dummy_components()
__UpperCamelCase : List[Any] = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__UpperCamelCase : str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__UpperCamelCase : Tuple = self.get_dummy_inputs(_UpperCAmelCase )
__UpperCamelCase : Any = "french fries"
__UpperCamelCase : List[Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
__UpperCamelCase : List[Any] = output.images
__UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase : List[str] = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : str = self.get_dummy_components()
__UpperCamelCase : Any = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__UpperCamelCase : Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__UpperCamelCase : Any = self.get_dummy_inputs(_UpperCAmelCase )
__UpperCamelCase : Tuple = [inputs["prompt"]] * 2
__UpperCamelCase : int = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
__UpperCamelCase : Optional[Any] = torch.from_numpy(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
__UpperCamelCase : List[str] = image / 2 + 0.5
__UpperCamelCase : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__UpperCamelCase : Dict = image.repeat(2 , 1 , 1 , 1 )
__UpperCamelCase : Optional[Any] = sd_pipe(**_UpperCAmelCase ).images
__UpperCamelCase : List[Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__UpperCamelCase : int = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a_ (self ) -> int:
__UpperCamelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : str = self.get_dummy_components()
__UpperCamelCase : int = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" )
__UpperCamelCase : str = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
__UpperCamelCase : List[str] = sd_pipe(**_UpperCAmelCase ).images
__UpperCamelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCamelCase : List[Any] = [round(_UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(_UpperCAmelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase : List[Any] = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a_ (self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_dummy_components()
__UpperCamelCase : int = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = VaeImageProcessor(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase )
__UpperCamelCase : int = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__UpperCamelCase : List[str] = pipe(**self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type="pt" ) )[0]
__UpperCamelCase : Dict = components["vae"]
__UpperCamelCase : List[Any] = self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__UpperCamelCase : List[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
__UpperCamelCase : Optional[Any] = pipe(**_UpperCAmelCase )[0]
__UpperCamelCase : Union[str, Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(_UpperCAmelCase , 1E-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ (self , _UpperCAmelCase=0 ) -> List[str]:
__UpperCamelCase : Any = torch.manual_seed(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
__UpperCamelCase : Dict = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def a_ (self ) -> List[str]:
__UpperCamelCase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__UpperCamelCase : str = self.get_inputs()
__UpperCamelCase : Optional[Any] = pipe(**_UpperCAmelCase ).images
__UpperCamelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCamelCase : Union[str, Any] = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase )
__UpperCamelCase : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__UpperCamelCase : Any = self.get_inputs()
__UpperCamelCase : Optional[int] = pipe(**_UpperCAmelCase ).images
__UpperCamelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCamelCase : Tuple = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def a_ (self ) -> List[str]:
__UpperCamelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__UpperCamelCase : Optional[Any] = self.get_inputs()
__UpperCamelCase : str = pipe(**_UpperCAmelCase ).images
__UpperCamelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCamelCase : int = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : str = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
__UpperCamelCase : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCamelCase : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCamelCase : int = latents[0, -3:, -3:, -1]
__UpperCamelCase : Optional[Any] = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__UpperCamelCase : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCamelCase : Any = latents[0, -3:, -3:, -1]
__UpperCamelCase : Dict = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
__UpperCamelCase : Optional[int] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__UpperCamelCase : Tuple = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def a_ (self ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
__UpperCamelCase : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : List[str] = self.get_inputs()
__UpperCamelCase : Optional[int] = pipe(**_UpperCAmelCase )
__UpperCamelCase : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def a_ (self ) -> int:
__UpperCamelCase : List[str] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCamelCase : Dict = inputs["image"].resize((5_0_4, 5_0_4) )
__UpperCamelCase : Optional[int] = "timbrooks/instruct-pix2pix"
__UpperCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__UpperCamelCase : Union[str, Any] = pipe(**_UpperCAmelCase )
__UpperCamelCase : int = output.images[0]
__UpperCamelCase : Union[str, Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__UpperCamelCase : Optional[int] = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 298
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = '''config.json'''
_lowerCAmelCase = '''diffusion_pytorch_model.bin'''
_lowerCAmelCase = '''diffusion_flax_model.msgpack'''
_lowerCAmelCase = '''model.onnx'''
_lowerCAmelCase = '''diffusion_pytorch_model.safetensors'''
_lowerCAmelCase = '''weights.pb'''
_lowerCAmelCase = '''https://huggingface.co'''
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = '''diffusers_modules'''
_lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_lowerCAmelCase = ['''fp16''', '''non-ema''']
_lowerCAmelCase = '''.self_attn'''
| 298
| 1
|
'''simple docstring'''
import qiskit
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : str = qiskit.Aer.get_backend("aer_simulator" )
__UpperCamelCase : Tuple = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCamelCase : Union[str, Any] = qiskit.execute(snake_case__ , snake_case__ , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = half_adder(1, 1)
print(f'Half Adder Output Qubit Counts: {counts}')
| 298
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298
| 1
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298
| 1
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
__UpperCamelCase : Dict = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
else:
__UpperCamelCase : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCamelCase , __UpperCamelCase : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
__UpperCamelCase : Optional[int] = ["key_proj", "value_proj", "query_proj"]
__UpperCamelCase : Union[str, Any] = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
__UpperCamelCase : int = key.split("." )
if attributes[0] == "lm_head":
__UpperCamelCase : int = prophet
__UpperCamelCase : List[Any] = prophet_old
else:
__UpperCamelCase : Dict = prophet.prophetnet
__UpperCamelCase : int = prophet_old.model
__UpperCamelCase : Tuple = False
for attribute in attributes:
if attribute in mapping:
__UpperCamelCase : Dict = mapping[attribute]
if not hasattr(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0:
__UpperCamelCase : Optional[Any] = attribute
elif hasattr(snake_case__ , snake_case__ ):
__UpperCamelCase : Dict = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__UpperCamelCase : Dict = old_model.weight
logger.info(F"{attribute} is initialized." )
__UpperCamelCase : Dict = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__UpperCamelCase : Optional[Any] = old_model.bias
logger.info(F"{attribute} is initialized" )
__UpperCamelCase : int = True
break
elif attribute in special_keys and hasattr(snake_case__ , "in_proj_weight" ):
__UpperCamelCase : Dict = old_model.in_proj_weight.shape[0] // 3
__UpperCamelCase : List[str] = getattr(snake_case__ , snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__UpperCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__UpperCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__UpperCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__UpperCamelCase : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__UpperCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__UpperCamelCase : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__UpperCamelCase : Union[str, Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__UpperCamelCase : List[str] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__UpperCamelCase : Optional[int] = True
break
if attribute.isdigit():
__UpperCamelCase : Any = model[int(snake_case__ )]
__UpperCamelCase : Tuple = old_model[int(snake_case__ )]
else:
__UpperCamelCase : Optional[Any] = getattr(snake_case__ , snake_case__ )
if old_attribute == "":
__UpperCamelCase : List[Any] = old_model
else:
if not hasattr(snake_case__ , snake_case__ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__UpperCamelCase : Dict = getattr(snake_case__ , snake_case__ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
| 1
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ""
A = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(self , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> Tuple:
super().__init__(self , **_UpperCAmelCase )
__UpperCamelCase : Any = repo_info
__UpperCamelCase : Optional[int] = token
__UpperCamelCase : List[str] = None
def a_ (self ) -> str:
if self.dir_cache is None:
__UpperCamelCase : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__UpperCamelCase : List[Any] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(_UpperCAmelCase ): {"name": str(_UpperCAmelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = "rb" , **_UpperCAmelCase , ) -> Tuple:
if not isinstance(self.repo_info , _UpperCAmelCase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
__UpperCamelCase : List[Any] = hf_hub_url(self.repo_info.id , _UpperCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
_UpperCAmelCase , mode=_UpperCAmelCase , headers=get_authentication_headers_for_url(_UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def a_ (self , _UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
self._get_dirs()
__UpperCamelCase : Any = self._strip_protocol(_UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase ) -> Optional[Any]:
self._get_dirs()
__UpperCamelCase : List[Any] = PurePosixPath(path.strip("/" ) )
__UpperCamelCase : Dict = {}
for p, f in self.dir_cache.items():
__UpperCamelCase : Optional[int] = PurePosixPath(p.strip("/" ) )
__UpperCamelCase : int = p.parent
if root == path:
__UpperCamelCase : Any = f
__UpperCamelCase : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 298
|
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCAmelCase ( snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BioGptTokenizer
A = False
def a_ (self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Optional[Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
__UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : int = "lower newer"
__UpperCamelCase : Optional[Any] = "lower newer"
return input_text, output_text
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
__UpperCamelCase : List[Any] = "lower"
__UpperCamelCase : Union[str, Any] = ["low", "er</w>"]
__UpperCamelCase : Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = tokens + ["<unk>"]
__UpperCamelCase : Optional[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
@slow
def a_ (self ) -> int:
__UpperCamelCase : List[Any] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__UpperCamelCase : Dict = tokenizer.encode("sequence builders" , add_special_tokens=_UpperCAmelCase )
__UpperCamelCase : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=_UpperCAmelCase )
__UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__UpperCamelCase : Any = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
if n == 1 or not isinstance(snake_case__ , snake_case__ ):
return 0
elif n == 2:
return 1
else:
__UpperCamelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Union[str, Any] = 2
while digits < n:
index += 1
__UpperCamelCase : Tuple = len(str(fibonacci(snake_case__ ) ) )
return index
def __lowerCAmelCase ( snake_case__ = 1_000 ):
return fibonacci_digits_index(snake_case__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 298
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 298
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298
| 1
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_lowerCAmelCase = random.Random()
def __lowerCAmelCase ( snake_case__ , snake_case__=1.0 , snake_case__=None , snake_case__=None ):
if rng is None:
__UpperCamelCase : Optional[Any] = global_rng
__UpperCamelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=2_0_0_0 , _UpperCAmelCase=2_0_4_8 , _UpperCAmelCase=1_2_8 , _UpperCAmelCase=1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_4_1_0_0 , ) -> str:
__UpperCamelCase : List[Any] = parent
__UpperCamelCase : Dict = batch_size
__UpperCamelCase : str = min_seq_length
__UpperCamelCase : Tuple = max_seq_length
__UpperCamelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase : Tuple = spectrogram_length
__UpperCamelCase : List[Any] = feature_size
__UpperCamelCase : Tuple = num_audio_channels
__UpperCamelCase : Optional[int] = hop_length
__UpperCamelCase : List[Any] = chunk_length
__UpperCamelCase : Any = sampling_rate
def a_ (self ) -> int:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a_ (self , _UpperCAmelCase=False , _UpperCAmelCase=False ) -> Dict:
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__UpperCamelCase : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase : str = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase : int = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = TvltFeatureExtractor
def a_ (self ) -> Any:
__UpperCamelCase : List[str] = TvltFeatureExtractionTester(self )
def a_ (self ) -> int:
__UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "spectrogram_length" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "feature_size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "num_audio_channels" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "hop_length" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "chunk_length" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "sampling_rate" ) )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : Tuple = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__UpperCamelCase : int = feat_extract_first.to_dict()
__UpperCamelCase : str = feat_extract_second.to_dict()
__UpperCamelCase : List[str] = dict_first.pop("mel_filters" )
__UpperCamelCase : Union[str, Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def a_ (self ) -> List[str]:
__UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : int = os.path.join(_UpperCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_UpperCAmelCase )
__UpperCamelCase : Any = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = feat_extract_first.to_dict()
__UpperCamelCase : Tuple = feat_extract_second.to_dict()
__UpperCamelCase : int = dict_first.pop("mel_filters" )
__UpperCamelCase : Dict = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def a_ (self ) -> Tuple:
# Initialize feature_extractor
__UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__UpperCamelCase : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__UpperCamelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__UpperCamelCase : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__UpperCamelCase : Tuple = feature_extractor(
_UpperCAmelCase , return_tensors="np" , sampling_rate=4_4_1_0_0 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__UpperCamelCase : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__UpperCamelCase : str = np.asarray(_UpperCAmelCase )
__UpperCamelCase : Dict = feature_extractor(_UpperCAmelCase , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a_ (self , _UpperCAmelCase ) -> List[Any]:
__UpperCamelCase : str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__UpperCamelCase : List[str] = ds.sort("id" ).select(range(_UpperCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self._load_datasamples(1 )
__UpperCamelCase : Union[str, Any] = TvltFeatureExtractor()
__UpperCamelCase : Optional[int] = feature_extractor(_UpperCAmelCase , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
__UpperCamelCase : List[str] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1E-4 ) )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self , _UpperCAmelCase ) -> np.ndarray:
if self.framework == "tf":
__UpperCamelCase : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__UpperCamelCase : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def a_ (self , _UpperCAmelCase ) -> np.ndarray:
__UpperCamelCase : List[Any] = self.get_masked_index(_UpperCAmelCase )
__UpperCamelCase : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def a_ (self , _UpperCAmelCase ) -> Any:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__UpperCamelCase : Dict = self.framework
__UpperCamelCase : Tuple = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.ensure_exactly_one_mask_token(_UpperCAmelCase )
return model_inputs
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = self.model(**_UpperCAmelCase )
__UpperCamelCase : Dict = model_inputs["input_ids"]
return model_outputs
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=None ) -> List[str]:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__UpperCamelCase : Tuple = target_ids.shape[0]
__UpperCamelCase : Union[str, Any] = model_outputs["input_ids"][0]
__UpperCamelCase : int = model_outputs["logits"]
if self.framework == "tf":
__UpperCamelCase : Optional[int] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__UpperCamelCase : List[Any] = outputs.numpy()
__UpperCamelCase : Union[str, Any] = outputs[0, masked_index, :]
__UpperCamelCase : List[Any] = stable_softmax(_UpperCAmelCase , axis=-1 )
if target_ids is not None:
__UpperCamelCase : Optional[int] = tf.gather_nd(tf.squeeze(_UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__UpperCamelCase : List[Any] = tf.expand_dims(_UpperCAmelCase , 0 )
__UpperCamelCase : Union[str, Any] = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase : Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
__UpperCamelCase : Union[str, Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__UpperCamelCase : Union[str, Any] = outputs[0, masked_index, :]
__UpperCamelCase : str = logits.softmax(dim=-1 )
if target_ids is not None:
__UpperCamelCase : int = probs[..., target_ids]
__UpperCamelCase , __UpperCamelCase : List[Any] = probs.topk(_UpperCAmelCase )
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Optional[int] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__UpperCamelCase : Any = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__UpperCamelCase : str = input_ids.numpy().copy()
if target_ids is not None:
__UpperCamelCase : Optional[Any] = target_ids[p].tolist()
__UpperCamelCase : Any = p
# Filter padding out:
__UpperCamelCase : Optional[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__UpperCamelCase : Optional[int] = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__UpperCamelCase : List[Any] = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
if single_mask:
return result[0]
return result
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None ) -> List[Any]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCamelCase : str = [targets]
try:
__UpperCamelCase : Any = self.tokenizer.get_vocab()
except Exception:
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : Tuple = []
for target in targets:
__UpperCamelCase : str = vocab.get(_UpperCAmelCase , _UpperCAmelCase )
if id_ is None:
__UpperCamelCase : Optional[int] = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , max_length=1 , truncation=_UpperCAmelCase , )["input_ids"]
if len(_UpperCAmelCase ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"We cannot replace it with anything meaningful, ignoring it" )
continue
__UpperCamelCase : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
__UpperCamelCase : Any = list(set(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__UpperCamelCase : Optional[int] = np.array(_UpperCAmelCase )
return target_ids
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = {}
if targets is not None:
__UpperCamelCase : str = self.get_target_ids(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = target_ids
if top_k is not None:
__UpperCamelCase : Any = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 100 , ):
__UpperCamelCase : int = x_start
__UpperCamelCase : Dict = fnc(snake_case__ )
__UpperCamelCase : Any = 0.0
for _ in range(snake_case__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__UpperCamelCase : Any = (x_end - x_start) / steps + xa
__UpperCamelCase : str = fnc(snake_case__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__UpperCamelCase : str = xa
__UpperCamelCase : Tuple = fxa
return area
if __name__ == "__main__":
def __lowerCAmelCase ( snake_case__ ):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowerCAmelCase = 10
while i <= 100000:
print(f'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 298
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
| 1
|
'''simple docstring'''
from __future__ import annotations
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> None:
__UpperCamelCase : Union[str, Any] = order
# a_{0} ... a_{k}
__UpperCamelCase : Tuple = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__UpperCamelCase : Union[str, Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__UpperCamelCase : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
__UpperCamelCase : List[Any] = [0.0] * self.order
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if len(_UpperCAmelCase ) < self.order:
__UpperCamelCase : Union[str, Any] = [1.0, *a_coeffs]
if len(_UpperCAmelCase ) != self.order + 1:
__UpperCamelCase : Any = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(_UpperCAmelCase )}"
)
raise ValueError(_UpperCAmelCase )
if len(_UpperCAmelCase ) != self.order + 1:
__UpperCamelCase : Any = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(_UpperCAmelCase )}"
)
raise ValueError(_UpperCAmelCase )
__UpperCamelCase : str = a_coeffs
__UpperCamelCase : int = b_coeffs
def a_ (self , _UpperCAmelCase ) -> float:
__UpperCamelCase : Dict = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__UpperCamelCase : Optional[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__UpperCamelCase : int = self.input_history[:-1]
__UpperCamelCase : Union[str, Any] = self.output_history[:-1]
__UpperCamelCase : Tuple = sample
__UpperCamelCase : int = result
return result
| 298
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298
| 1
|
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase="" , _UpperCAmelCase="train" ) -> Any:
assert os.path.isdir(_UpperCAmelCase )
__UpperCamelCase : str = []
__UpperCamelCase : Any = os.listdir(_UpperCAmelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__UpperCamelCase : Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ):
continue
self.documents.append(_UpperCAmelCase )
def __len__(self ) -> Dict:
return len(self.documents )
def __getitem__(self , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : int = self.documents[idx]
__UpperCamelCase : Dict = document_path.split("/" )[-1]
with open(_UpperCAmelCase , encoding="utf-8" ) as source:
__UpperCamelCase : Optional[int] = source.read()
__UpperCamelCase , __UpperCamelCase : List[str] = process_story(_UpperCAmelCase )
return document_name, story_lines, summary_lines
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = list(filter(lambda snake_case__ : len(snake_case__ ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
__UpperCamelCase : int = [_add_missing_period(snake_case__ ) for line in nonempty_lines]
# gather article lines
__UpperCamelCase : List[str] = []
__UpperCamelCase : Any = deque(snake_case__ )
while True:
try:
__UpperCamelCase : Dict = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(snake_case__ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__UpperCamelCase : Dict = list(filter(lambda snake_case__ : not t.startswith("@highlight" ) , snake_case__ ) )
return story_lines, summary_lines
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if len(snake_case__ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(snake_case__ )) )
return sequence
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Dict = torch.ones_like(snake_case__ )
__UpperCamelCase : Optional[Any] = sequence == pad_token_id
__UpperCamelCase : Any = 0
return mask
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = [tokenizer.encode(snake_case__ ) for line in story_lines]
__UpperCamelCase : Any = [token for sentence in story_lines_token_ids for token in sentence]
__UpperCamelCase : Union[str, Any] = [tokenizer.encode(snake_case__ ) for line in summary_lines]
__UpperCamelCase : Optional[Any] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = []
for sequence in batch:
__UpperCamelCase : List[Any] = -1
__UpperCamelCase : List[Any] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(snake_case__ )
return torch.tensor(snake_case__ )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , *_UpperCAmelCase , **_UpperCAmelCase ) -> None:
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations(snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations_with_dp_array(
snake_case__ , snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__UpperCamelCase : List[str] = answer
return answer
__UpperCamelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [0] * (target + 1)
__UpperCamelCase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 298
| 1
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_lowerCAmelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
_lowerCAmelCase = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
_lowerCAmelCase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_lowerCAmelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCAmelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCAmelCase = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 298
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 298
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=3_2 , _UpperCAmelCase=3 , _UpperCAmelCase=1_0 , _UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , ) -> str:
__UpperCamelCase : int = parent
__UpperCamelCase : int = batch_size
__UpperCamelCase : Dict = image_size
__UpperCamelCase : Any = num_channels
__UpperCamelCase : Optional[Any] = embeddings_size
__UpperCamelCase : Optional[int] = hidden_sizes
__UpperCamelCase : Dict = depths
__UpperCamelCase : str = is_training
__UpperCamelCase : Tuple = use_labels
__UpperCamelCase : Any = hidden_act
__UpperCamelCase : Optional[Any] = num_labels
__UpperCamelCase : Optional[Any] = scope
__UpperCamelCase : Any = len(_UpperCAmelCase )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : Union[str, Any] = None
if self.use_labels:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def a_ (self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : List[Any] = RegNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = self.num_labels
__UpperCamelCase : Optional[Any] = RegNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : str = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self ) -> List[str]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = config_and_inputs
__UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
A = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
A = False
A = False
A = False
A = False
def a_ (self ) -> int:
__UpperCamelCase : Tuple = RegNetModelTester(self )
__UpperCamelCase : str = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def a_ (self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ (self ) -> List[str]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def a_ (self ) -> Any:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def a_ (self ) -> str:
pass
def a_ (self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
__UpperCamelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : List[Any] = model_class(config=_UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def a_ (self ) -> Dict:
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__UpperCamelCase : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Union[str, Any] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCamelCase : Dict = layer_type
__UpperCamelCase : str = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> List[Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[Any] = RegNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ (self ) -> str:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCAmelCase )
__UpperCamelCase : int = self.default_image_processor
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : str = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : Tuple = model(**_UpperCAmelCase )
# verify the logits
__UpperCamelCase : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__UpperCamelCase : Tuple = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 298
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict:
__UpperCamelCase : Dict = parent
__UpperCamelCase : Any = do_resize
__UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8}
__UpperCamelCase : Any = size_divisor
__UpperCamelCase : Optional[int] = do_rescale
__UpperCamelCase : Union[str, Any] = rescale_factor
__UpperCamelCase : int = do_normalize
__UpperCamelCase : List[Any] = do_center_crop
__UpperCamelCase : Optional[int] = image_mean
__UpperCamelCase : Tuple = image_std
__UpperCamelCase : Tuple = do_pad
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Dict = min_resolution
__UpperCamelCase : Optional[Any] = max_resolution
def a_ (self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
__UpperCamelCase : List[str] = self.size["shortest_edge"]
__UpperCamelCase : Optional[int] = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
__UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w
else:
__UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size
__UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size:
__UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = newh * scale
__UpperCamelCase : Union[str, Any] = neww * scale
__UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCamelCase , __UpperCamelCase : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCamelCase : int = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BridgeTowerImageProcessor if is_vision_available() else None
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self )
@property
def a_ (self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) )
def a_ (self ) -> List[str]:
pass
def a_ (self ) -> List[Any]:
# Initialize image processor
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> Tuple:
# Initialize image processor
__UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> int:
# Initialize image processor
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 298
| 1
|
'''simple docstring'''
from manim import *
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = Rectangle(height=0.5 , width=0.5 )
__UpperCamelCase : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCamelCase : int = [mem.copy() for i in range(6 )]
__UpperCamelCase : Any = [mem.copy() for i in range(6 )]
__UpperCamelCase : Union[str, Any] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__UpperCamelCase : Any = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__UpperCamelCase : Union[str, Any] = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__UpperCamelCase : Any = Text("CPU" , font_size=2_4 )
__UpperCamelCase : Dict = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = [mem.copy() for i in range(1 )]
__UpperCamelCase : List[str] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__UpperCamelCase : Dict = Text("GPU" , font_size=2_4 )
__UpperCamelCase : List[Any] = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.align_to(_UpperCAmelCase , _UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = [mem.copy() for i in range(6 )]
__UpperCamelCase : str = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__UpperCamelCase : Dict = Text("Model" , font_size=2_4 )
__UpperCamelCase : List[Any] = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_UpperCAmelCase , run_time=1 ) , Create(_UpperCAmelCase , run_time=1 ) , Create(_UpperCAmelCase , run_time=1 ) , )
__UpperCamelCase : Dict = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=2_4 , )
__UpperCamelCase : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCamelCase : Union[str, Any] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=2.5 ) , Write(_UpperCAmelCase ) , Write(_UpperCAmelCase ) )
self.add(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : int = []
__UpperCamelCase : Optional[Any] = []
for i, rect in enumerate(_UpperCAmelCase ):
__UpperCamelCase : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.7 )
cpu_target.move_to(_UpperCAmelCase )
cpu_target.generate_target()
__UpperCamelCase : List[Any] = 0.46 / 4
__UpperCamelCase : Any = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_UpperCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_UpperCAmelCase , buff=0.0 )
cpu_targs.append(_UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_UpperCAmelCase ) )
second_animations.append(MoveToTarget(_UpperCAmelCase , run_time=1.5 ) )
self.play(*_UpperCAmelCase )
self.play(*_UpperCAmelCase )
self.wait()
| 298
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" )
__UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
__UpperCamelCase : Union[str, Any] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
__UpperCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(snake_case__ ),
}
__UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ )
__UpperCamelCase : str = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = hf_param.shape
__UpperCamelCase : List[Any] = to_torch(params[gluon_param] )
__UpperCamelCase : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__UpperCamelCase : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__UpperCamelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase : BertSelfAttention = layer.attention.self
__UpperCamelCase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__UpperCamelCase : str = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__UpperCamelCase : Tuple = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__UpperCamelCase : BertSelfOutput = layer.attention.output
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__UpperCamelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__UpperCamelCase : BertIntermediate = layer.intermediate
__UpperCamelCase : Dict = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__UpperCamelCase : BertOutput = layer.output
__UpperCamelCase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__UpperCamelCase : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" )
__UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"]
# Get gluon output
__UpperCamelCase : Dict = mx.nd.array([input_ids] )
__UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
__UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
__UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0]
__UpperCamelCase : List[Any] = output_gluon[0].asnumpy()
__UpperCamelCase : Optional[int] = output_hf[0].detach().numpy()
__UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 298
| 1
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase = 10
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
for i in range(snake_case__ , snake_case__ ):
if array[i] == target:
return i
return -1
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[str] = 0
__UpperCamelCase : Tuple = len(snake_case__ )
while left <= right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
__UpperCamelCase : List[str] = (left + right) // 3 + 1
__UpperCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__UpperCamelCase : Dict = one_third - 1
elif array[two_third] < target:
__UpperCamelCase : Union[str, Any] = two_third + 1
else:
__UpperCamelCase : Dict = one_third + 1
__UpperCamelCase : List[str] = two_third - 1
else:
return -1
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
if left < right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
__UpperCamelCase : Optional[Any] = (left + right) // 3 + 1
__UpperCamelCase : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case__ , one_third - 1 , snake_case__ , snake_case__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case__ , snake_case__ , snake_case__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case__ , snake_case__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase = ite_ternary_search(collection, target)
_lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'Iterative search: {target} found at positions: {resulta}')
print(f'Recursive search: {target} found at positions: {resulta}')
else:
print('''Not found''')
| 298
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def __lowerCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __lowerCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@require_beam
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> Optional[Any]:
import apache_beam as beam
__UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet
__UpperCamelCase : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
__UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> str:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a_ (self ) -> List[str]:
__UpperCamelCase : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
__UpperCamelCase : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 1
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _a ( a :Features ) -> Optional[int]:
a = np.inf
def set_batch_size(a :FeatureType ) -> None:
nonlocal batch_size
if isinstance(a , a ):
a = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(a , a ):
a = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(a , a ) and feature.dtype == "binary":
a = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(a , a )
return None if batch_size is np.inf else batch_size
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : str , __UpperCAmelCase : NestedDataStructureLike[PathLike] , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : List[Any] , ) ->List[Any]:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
a = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
a = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
a = Parquet(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , hash=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
if self.streaming:
a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a = None
a = None
a = None
a = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
a = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dataset , __UpperCAmelCase : Union[PathLike, BinaryIO] , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : List[str] , ) ->Any:
"""simple docstring"""
a = dataset
a = path_or_buf
a = batch_size or get_writer_batch_size(dataset.features )
a = parquet_writer_kwargs
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
a = self._write(file_obj=__UpperCAmelCase , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs )
else:
a = self._write(file_obj=self.path_or_buf , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : BinaryIO , __UpperCAmelCase : int , **__UpperCAmelCase : List[str] ) ->int:
"""simple docstring"""
a = 0
a = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCAmelCase )
a = self.dataset.features.arrow_schema
a = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase , **__UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __UpperCAmelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
a = query_table(
table=self.dataset._data , key=slice(__UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: str ={
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __A ( UpperCamelCase__ ):
a__ : int = """swin2sr"""
a__ : Optional[int] = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self : Dict , __a : Optional[Any]=64 , __a : int=1 , __a : Any=3 , __a : Optional[int]=180 , __a : Union[str, Any]=[6, 6, 6, 6, 6, 6] , __a : List[str]=[6, 6, 6, 6, 6, 6] , __a : int=8 , __a : Optional[Any]=2.0 , __a : Dict=True , __a : str=0.0 , __a : str=0.0 , __a : List[str]=0.1 , __a : Any="gelu" , __a : Any=False , __a : Any=0.02 , __a : Optional[int]=1E-5 , __a : Tuple=2 , __a : Optional[Any]=1.0 , __a : List[Any]="1conv" , __a : int="pixelshuffle" , **__a : str , ):
super().__init__(**__a )
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(__a )
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = upscale
UpperCAmelCase_ = img_range
UpperCAmelCase_ = resi_connection
UpperCAmelCase_ = upsampler
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298
| 0
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase : str = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
lowerCamelCase : int = []
lowerCamelCase : Optional[int] = []
lowerCamelCase : Tuple = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
lowerCamelCase : Union[str, Any] = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
'emoji': True,
},
}
]
lowerCamelCase : Any = 0
for log in Path().glob('*.log'):
lowerCamelCase : int = 0
with open(log, 'r') as f:
for line in f:
lowerCamelCase : Optional[Any] = json.loads(line)
if line.get('nodeid', '') != "":
lowerCamelCase : Optional[int] = line['nodeid']
if line.get('duration', None) is not None:
lowerCamelCase : Any = f"""{line['duration']:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase : Optional[int] = []
log.unlink()
lowerCamelCase : Optional[int] = ''
lowerCamelCase : int = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase : str = []
lowerCamelCase : List[str] = {}
for test in failed_tests:
lowerCamelCase : Dict = test[0].split('::')
lowerCamelCase : Optional[int] = data[0].split('/')[-1]
if data[0] not in filesafailed:
lowerCamelCase : Optional[Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase : Any = [test[0] for test in failed_table]
lowerCamelCase : List[Any] = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase : List[str] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase : str = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
lowerCamelCase : Dict = 'Too many failed tests, please see the full report in the Action results.'
lowerCamelCase : List[Any] = len(err) + 10
lowerCamelCase : List[Any] = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
lowerCamelCase : Optional[int] = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
lowerCamelCase : Optional[int] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
lowerCamelCase : str = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
lowerCamelCase : Dict = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
lowerCamelCase : Tuple = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase : Any = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
lowerCamelCase : Tuple = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase : Any = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase : Optional[Any] = row[0]
else:
lowerCamelCase : int = ''
lowerCamelCase : Optional[int] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 2
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = '''RegNetConfig'''
# Base docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = '''tabby, tabby cat'''
_lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , )
__UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) )
__UpperCamelCase : Dict = self.normalization(_UpperCAmelCase )
__UpperCamelCase : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = config.num_channels
__UpperCamelCase : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def a_ (self , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) )
__UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" )
__UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase )
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
__UpperCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def a_ (self , _UpperCAmelCase ) -> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase )
for layer_module in self.attention:
__UpperCamelCase : str = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1
__UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : List[Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase : Optional[Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ),
]
__UpperCamelCase : Dict = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Tuple = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : str = in_channels != out_channels or stride != 1
__UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : Union[str, Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCamelCase : Union[str, Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ),
]
__UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> int:
__UpperCamelCase : str = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Any = layer_module(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ),
*[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def a_ (self , _UpperCAmelCase ) -> Any:
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase : Any = hidden_states + (hidden_state,)
__UpperCamelCase : Any = stage_module(_UpperCAmelCase )
if output_hidden_states:
__UpperCamelCase : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
A = RegNetConfig
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = config
__UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" )
__UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" )
__UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
@unpack_inputs
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : str = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : List[str] = encoder_outputs[0]
__UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
__UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = RegNetConfig
A = "regnet"
A = "pixel_values"
@property
def a_ (self ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Tuple = self.regnet(
pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = config.num_labels
__UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
# classification head
__UpperCamelCase : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Dict = self.regnet(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase )
__UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase )
__UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
if not return_dict:
__UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 298
| 0
|
'''simple docstring'''
from math import factorial
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
A : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A : Tuple = float(factorial(snake_case__ ) )
coefficient /= factorial(snake_case__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 3
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = torch.exp(snake_case__ )
__UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i)
__UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case__ ) - B / A
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
__UpperCamelCase : Any = config.output_attentions
__UpperCamelCase : Dict = config.output_hidden_states
__UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )]
def a_ (self , _UpperCAmelCase ) -> int:
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase : str = x
else:
__UpperCamelCase : List[Any] = x
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ()
__UpperCamelCase : Tuple = ()
__UpperCamelCase : Dict = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase : Tuple = all_hidden_states + (hidden_states,)
__UpperCamelCase : Optional[int] = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],)
__UpperCamelCase : Any = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : int = current_outputs + (all_attentions,)
__UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase : Dict = highway_exit[0]
__UpperCamelCase : Any = entropy(_UpperCAmelCase )
__UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
__UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase : int = all_hidden_states + (hidden_states,)
__UpperCamelCase : Dict = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : Optional[int] = outputs + (all_attentions,)
__UpperCamelCase : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = config
__UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase )
__UpperCamelCase : str = BertPooler(_UpperCAmelCase )
self.init_weights()
def a_ (self ) -> Any:
self.encoder.init_highway_pooler(self.pooler )
def a_ (self ) -> Optional[int]:
return self.embeddings.word_embeddings
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : int = value
def a_ (self , _UpperCAmelCase ) -> Tuple:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__UpperCamelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase : Any = encoder_attention_mask[:, None, None, :]
__UpperCamelCase : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase : Optional[int] = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__UpperCamelCase : Union[str, Any] = encoder_outputs[0]
__UpperCamelCase : Any = self.pooler(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : Tuple = message
__UpperCamelCase : Union[str, Any] = exit_layer # start from 1!
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__()
__UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase )
__UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def a_ (self , _UpperCAmelCase ) -> Any:
# Pooler
__UpperCamelCase : Optional[int] = encoder_outputs[0]
__UpperCamelCase : str = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase : Dict = bmodel_output[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Any = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Any:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : List[Any] = config.num_labels
__UpperCamelCase : List[Any] = config.num_hidden_layers
__UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase )
__UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int:
__UpperCamelCase : int = self.num_layers
try:
__UpperCamelCase : Tuple = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase : str = outputs[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Dict = self.classifier(_UpperCAmelCase )
__UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase : int = e.message
__UpperCamelCase : Optional[Any] = e.exit_layer
__UpperCamelCase : Optional[int] = outputs[0]
if not self.training:
__UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : List[str] = MSELoss()
__UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Dict = CrossEntropyLoss()
__UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase : List[Any] = []
for highway_exit in outputs[-1]:
__UpperCamelCase : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Union[str, Any] = MSELoss()
__UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Optional[Any] = CrossEntropyLoss()
__UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
__UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase : Dict = (loss,) + outputs
if not self.training:
__UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 298
| 0
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=1024 , lowerCamelCase : Tuple=1024 , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ):
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = SeqaSeqDataset(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , type_path='train' , **lowerCamelCase )
lowerCAmelCase = tok.pad_token_id
def get_lens(lowerCamelCase : List[Any] ):
lowerCAmelCase = tqdm(
DataLoader(lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase = []
for batch in dl:
lowerCAmelCase = batch['input_ids'].ne(lowerCamelCase ).sum(1 ).tolist()
lowerCAmelCase = batch['labels'].ne(lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase , lowerCamelCase ):
max_lens.append(max(lowerCamelCase , lowerCamelCase ) )
else:
max_lens.extend(lowerCamelCase )
return max_lens
lowerCAmelCase = get_lens(lowerCamelCase )
lowerCAmelCase = SeqaSeqDataset(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , type_path='val' , **lowerCamelCase )
lowerCAmelCase = get_lens(lowerCamelCase )
pickle_save(lowerCamelCase , train_ds.len_file )
pickle_save(lowerCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 4
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = '''config.json'''
_lowerCAmelCase = '''diffusion_pytorch_model.bin'''
_lowerCAmelCase = '''diffusion_flax_model.msgpack'''
_lowerCAmelCase = '''model.onnx'''
_lowerCAmelCase = '''diffusion_pytorch_model.safetensors'''
_lowerCAmelCase = '''weights.pb'''
_lowerCAmelCase = '''https://huggingface.co'''
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = '''diffusers_modules'''
_lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_lowerCAmelCase = ['''fp16''', '''non-ema''']
_lowerCAmelCase = '''.self_attn'''
| 298
| 0
|
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298
| 0
|
import re
import string
import numpy as np
import datasets
A : Union[str, Any] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
A : int = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
A : List[str] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=False , _snake_case=False , _snake_case=False , ) -> List[Any]:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a = np.array([re.sub(_snake_case , '''''' , _snake_case ) for x in predictions] )
__a = np.array([re.sub(_snake_case , '''''' , _snake_case ) for x in references] )
else:
__a = np.asarray(_snake_case )
__a = np.asarray(_snake_case )
if ignore_case:
__a = np.char.lower(_snake_case )
__a = np.char.lower(_snake_case )
if ignore_punctuation:
__a = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
__a = np.char.translate(_snake_case , table=_snake_case )
__a = np.char.translate(_snake_case , table=_snake_case )
if ignore_numbers:
__a = string.digits.maketrans('''''' , '''''' , string.digits )
__a = np.char.translate(_snake_case , table=_snake_case )
__a = np.char.translate(_snake_case , table=_snake_case )
__a = predictions == references
return {"exact_match": np.mean(_snake_case ) * 100}
| 6
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298
| 0
|
from collections import defaultdict
class A :
"""simple docstring"""
def __init__( self : List[Any],lowercase_ : str,lowercase_ : str )-> Optional[int]:
'''simple docstring'''
A__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
A__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowercase_ ) )
]
A__ = defaultdict(lowercase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
A__ = (1 << len(lowercase_ )) - 1
def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any] )-> Optional[Any]:
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
A__ = self.count_ways_until(lowercase_,task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p),task_no + 1 )
# save the value.
A__ = total_ways_util
return self.dp[mask][task_no]
def snake_case__ ( self : Optional[int],lowercase_ : Dict )-> Dict:
'''simple docstring'''
for i in range(len(lowercase_ ) ):
for j in task_performed[i]:
self.task[j].append(lowercase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0,1 )
if __name__ == "__main__":
lowercase_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowercase_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 7
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
| 0
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class snake_case_ :
'''simple docstring'''
@property
def snake_case__( self : List[Any] ) ->List[Any]:
return self.get_dummy_input()
@property
def snake_case__( self : Optional[int] ) ->Optional[Any]:
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def snake_case__( self : Optional[int] , _UpperCamelCase : int=True , _UpperCamelCase : int=False , _UpperCamelCase : List[str]=False , _UpperCamelCase : List[Any]=False , ) ->Optional[int]:
snake_case_ = 4
snake_case_ = 3_2
snake_case_ = (3_2, 3_2)
snake_case_ = torch.manual_seed(0 )
snake_case_ = torch.device(_UpperCamelCase )
snake_case_ = (batch_size, num_channels) + sizes
snake_case_ = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase )
snake_case_ = {'''hidden_states''': hidden_states}
if include_temb:
snake_case_ = 1_2_8
snake_case_ = randn_tensor((batch_size, temb_channels) , generator=_UpperCamelCase , device=_UpperCamelCase )
if include_res_hidden_states_tuple:
snake_case_ = torch.manual_seed(1 )
snake_case_ = (randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase ),)
if include_encoder_hidden_states:
snake_case_ = floats_tensor((batch_size, 3_2, 3_2) ).to(_UpperCamelCase )
if include_skip_sample:
snake_case_ = randn_tensor(((batch_size, 3) + sizes) , generator=_UpperCamelCase , device=_UpperCamelCase )
return dummy_input
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
if self.block_type == "up":
snake_case_ = 3_2
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def snake_case__( self : Optional[int] , _UpperCamelCase : Union[str, Any] ) ->str:
snake_case_, snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.block_class(**_UpperCamelCase )
unet_block.to(_UpperCamelCase )
unet_block.eval()
with torch.no_grad():
snake_case_ = unet_block(**_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = output[0]
self.assertEqual(output.shape , self.output_shape )
snake_case_ = output[0, -1, -3:, -3:]
snake_case_ = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
assert torch_all_close(output_slice.flatten() , _UpperCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def snake_case__( self : Any ) ->Union[str, Any]:
snake_case_, snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.block_class(**_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
snake_case_ = model(**_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = output[0]
snake_case_ = torch.device(_UpperCamelCase )
snake_case_ = randn_tensor(output.shape , device=_UpperCamelCase )
snake_case_ = torch.nn.functional.mse_loss(_UpperCamelCase , _UpperCamelCase )
loss.backward()
| 8
|
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCAmelCase ( snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
import argparse
import datetime
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
__SCREAMING_SNAKE_CASE : Dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase__ ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
__SCREAMING_SNAKE_CASE : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
__SCREAMING_SNAKE_CASE : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
__SCREAMING_SNAKE_CASE : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
__SCREAMING_SNAKE_CASE : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
__SCREAMING_SNAKE_CASE : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
__SCREAMING_SNAKE_CASE : List[str] = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) )
# Start math
if m <= 2:
__SCREAMING_SNAKE_CASE : Dict = y - 1
__SCREAMING_SNAKE_CASE : List[str] = m + 12
# maths var
__SCREAMING_SNAKE_CASE : int = int(str(lowercase__ )[:2] )
__SCREAMING_SNAKE_CASE : int = int(str(lowercase__ )[2:] )
__SCREAMING_SNAKE_CASE : int = int(2.6 * m - 5.39 )
__SCREAMING_SNAKE_CASE : int = int(c / 4 )
__SCREAMING_SNAKE_CASE : int = int(k / 4 )
__SCREAMING_SNAKE_CASE : int = int(d + k )
__SCREAMING_SNAKE_CASE : int = int(t + u + v + x )
__SCREAMING_SNAKE_CASE : int = int(z - (2 * c) )
__SCREAMING_SNAKE_CASE : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
__SCREAMING_SNAKE_CASE : str = F'''Your date {date_input}, is a {days[str(lowercase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : int =argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
__lowerCAmelCase : int =parser.parse_args()
zeller(args.date_input)
| 9
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298
| 0
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =cached_file(UpperCAmelCase_ , UpperCAmelCase_)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_)))
with open(os.path.join(UpperCAmelCase_ , "refs" , "main")) as f:
lowerCamelCase__: List[Any] =f.read()
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "snapshots" , UpperCAmelCase_ , UpperCAmelCase_))
self.assertTrue(os.path.isfile(UpperCAmelCase_))
# File is cached at the same place the second time.
lowerCamelCase__: Optional[Any] =cached_file(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Using a specific revision to test the full commit hash.
lowerCamelCase__: str =cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="9b8c223")
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "snapshots" , UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid model identifier"):
lowerCamelCase__: List[Any] =cached_file("tiny-random-bert" , UpperCAmelCase_)
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid git identifier"):
lowerCamelCase__: List[str] =cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="aaaa")
with self.assertRaisesRegex(UpperCAmelCase_ , "does not appear to have a file named"):
lowerCamelCase__: Optional[Any] =cached_file(UpperCAmelCase_ , "conf")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , "does not appear to have a file named"):
lowerCamelCase__: Dict =cached_file(UpperCAmelCase_ , "conf")
with open(os.path.join(UpperCAmelCase_ , "refs" , "main")) as f:
lowerCamelCase__: List[str] =f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , ".no_exist" , UpperCAmelCase_ , "conf")))
lowerCamelCase__: Union[str, Any] =cached_file(UpperCAmelCase_ , "conf" , _raise_exceptions_for_missing_entries=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =cached_file(UpperCAmelCase_ , "conf" , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
lowerCamelCase__: List[Any] =mock.Mock()
lowerCamelCase__: int =500
lowerCamelCase__: Union[str, Any] ={}
lowerCamelCase__: Any =HTTPError
lowerCamelCase__: List[Any] ={}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_) as mock_head:
lowerCamelCase__: int =cached_file(UpperCAmelCase_ , "conf" , _raise_exceptions_for_connection_errors=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple:
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_))
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_))
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt"))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid model identifier"):
get_file_from_repo("bert-base-case" , UpperCAmelCase_)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid git identifier"):
get_file_from_repo("bert-base-cased" , UpperCAmelCase_ , revision="ahaha")
lowerCamelCase__: List[str] =get_file_from_repo("bert-base-cased" , UpperCAmelCase_)
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCamelCase__: List[str] =json.loads(open(UpperCAmelCase_ , "r").read())
self.assertEqual(config["hidden_size"] , 768)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__: List[Any] =Path(UpperCAmelCase_) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ , "a.txt") , str(UpperCAmelCase_))
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , "b.txt"))
| 10
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : int = 'vision-encoder-decoder'
UpperCAmelCase__ : Union[str, Any] = True
def __init__( self: List[str] , **UpperCamelCase_: List[Any] ):
super().__init__(**UpperCamelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
__lowerCamelCase = kwargs.pop("""encoder""" )
__lowerCamelCase = encoder_config.pop("""model_type""" )
__lowerCamelCase = kwargs.pop("""decoder""" )
__lowerCamelCase = decoder_config.pop("""model_type""" )
__lowerCamelCase = AutoConfig.for_model(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = AutoConfig.for_model(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = True
@classmethod
def lowerCAmelCase__ ( cls: Tuple , UpperCamelCase_: PretrainedConfig , UpperCamelCase_: PretrainedConfig , **UpperCamelCase_: Any ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__lowerCamelCase = True
__lowerCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.encoder.to_dict()
__lowerCamelCase = self.decoder.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = OrderedDict()
__lowerCamelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowerCamelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowerCamelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: "PreTrainedTokenizerBase" , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional["TensorType"] = None , ):
import torch
__lowerCamelCase = OrderedDict()
__lowerCamelCase = super().generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = dummy_input["""input_ids"""].shape
__lowerCamelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
__lowerCamelCase = dummy_input.pop("""input_ids""" )
__lowerCamelCase = dummy_input.pop("""attention_mask""" )
__lowerCamelCase = torch.zeros(UpperCamelCase_ )
return common_inputs
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass
def lowerCAmelCase__ ( self: str , UpperCamelCase_: PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: PretrainedConfig , UpperCamelCase_: PretrainedConfig , UpperCamelCase_: str = "default" ):
__lowerCamelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCamelCase_ , UpperCamelCase_ )
| 12
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = generate_pascal_triangle(_UpperCAmelCase )
for row_idx in range(_UpperCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
SCREAMING_SNAKE_CASE_: list[list[int]] = []
for current_row_idx in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = populate_current_row(_UpperCAmelCase , _UpperCAmelCase )
triangle.append(_UpperCAmelCase )
return triangle
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = 1, 1
for current_col_idx in range(1 , _UpperCAmelCase ):
calculate_current_element(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return current_row
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: str = triangle[current_row_idx - 1][current_col_idx - 1]
SCREAMING_SNAKE_CASE_: Optional[int] = triangle[current_row_idx - 1][current_col_idx]
SCREAMING_SNAKE_CASE_: str = above_to_left_elt + above_to_right_elt
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
SCREAMING_SNAKE_CASE_: list[list[int]] = [[1]]
for row_index in range(1 , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = [0] + result[-1] + [0]
SCREAMING_SNAKE_CASE_: Tuple = row_index + 1
# Calculate the number of distinct elements in a row
SCREAMING_SNAKE_CASE_: Any = sum(divmod(_UpperCAmelCase , 2 ) )
SCREAMING_SNAKE_CASE_: Optional[int] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
SCREAMING_SNAKE_CASE_: Tuple = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
SCREAMING_SNAKE_CASE_: List[str] = row_first_half + row_second_half
result.append(_UpperCAmelCase )
return result
def A_ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) -> None:
SCREAMING_SNAKE_CASE_: int = f"{func.__name__}({value})"
SCREAMING_SNAKE_CASE_: List[str] = timeit(f"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 13
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = BarthezTokenizer
UpperCAmelCase__ = BarthezTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
super().setUp()
A__ = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''')
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCAmelCase__)
A__ = tokenizer
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = '''<pad>'''
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-1] , '''<mask>''')
self.assertEqual(len(UpperCAmelCase__) , 101_122)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101_122)
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
A__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A__ = [0, 57, 3_018, 70_307, 91, 2]
A__ = self.tokenizer(
UpperCAmelCase__ , max_length=len(UpperCAmelCase__) , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''')
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = '''I was born in 92000, and this is falsé.'''
A__ = tokenizer.tokenize(UpperCAmelCase__)
A__ = rust_tokenizer.tokenize(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
A__ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(UpperCAmelCase__)
A__ = rust_tokenizer.encode(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = {'''input_ids''': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A__ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=UpperCAmelCase__ , )
| 14
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Any = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : str ,A : Optional[Any]=5_02_67 ,A : int=10_24 ,A : List[Any]=12 ,A : Any=40_96 ,A : Dict=16 ,A : Any=12 ,A : Optional[int]=40_96 ,A : Optional[int]=16 ,A : List[Any]=0.0 ,A : List[Any]=0.0 ,A : Optional[Any]="gelu" ,A : int=10_24 ,A : int=0.1 ,A : Tuple=0.0 ,A : Optional[Any]=0.0 ,A : Optional[Any]=0.02 ,A : str=0.0 ,A : Any=False ,A : Optional[Any]=True ,A : str=1 ,A : Optional[Any]=0 ,A : Optional[Any]=2 ,A : List[Any]=True ,A : int=2 ,A : str=2 ,A : List[Any]=False ,A : str=1_00 ,A : Any=8_00 ,**A : str ,):
__A = vocab_size
__A = max_position_embeddings
__A = d_model
__A = encoder_ffn_dim
__A = encoder_layers
__A = encoder_attention_heads
__A = decoder_ffn_dim
__A = decoder_layers
__A = decoder_attention_heads
__A = dropout
__A = attention_dropout
__A = activation_dropout
__A = activation_function
__A = init_std
__A = encoder_layerdrop
__A = decoder_layerdrop
__A = classifier_dropout
__A = use_cache
__A = encoder_layers
__A = scale_embedding # scale factor will be sqrt(d_model) if True
__A = use_prompt
__A = prompt_length
__A = prompt_mid_dim
super().__init__(
pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,is_encoder_decoder=A ,decoder_start_token_id=A ,forced_eos_token_id=A ,**A ,)
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" ,A ):
__A = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
| 15
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298
| 0
|
"""simple docstring"""
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 16
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_a = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int, UpperCAmelCase__ : Any, UpperCAmelCase__ : str=7, UpperCAmelCase__ : int=3, UpperCAmelCase__ : List[Any]=1_8, UpperCAmelCase__ : Optional[Any]=3_0, UpperCAmelCase__ : Optional[int]=4_0_0, UpperCAmelCase__ : Optional[Any]=None, UpperCAmelCase__ : Union[str, Any]=True, UpperCAmelCase__ : Dict=True, UpperCAmelCase__ : List[str]=None, ):
__lowercase = size if size is not None else {"height": 2_0, "width": 2_0}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = size
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
__lowercase = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def _lowercase ( self : Tuple ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _lowercase ( self : Any ):
__lowercase = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
__lowercase = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." ,)
@require_torch
@require_vision
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = PixaStructImageProcessor if is_vision_available() else None
def _lowercase ( self : Union[str, Any] ):
__lowercase = PixaStructImageProcessingTester(self )
@property
def _lowercase ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : int ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__, "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__, "do_convert_rgb" ) )
def _lowercase ( self : int ):
__lowercase = self.image_processor_tester.prepare_dummy_image()
__lowercase = self.image_processing_class(**self.image_processor_dict )
__lowercase = 2_0_4_8
__lowercase = image_processor(UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0_606 ), atol=1E-3, rtol=1E-3 ) )
def _lowercase ( self : Dict ):
# Initialize image_processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, Image.Image )
# Test not batched input
__lowercase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
__lowercase = image_processor(
UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def _lowercase ( self : Tuple ):
# Initialize image_processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, Image.Image )
# Test not batched input
__lowercase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
__lowercase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCAmelCase__ ):
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
__lowercase = "Hello"
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__, header_text=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
__lowercase = image_processor(
UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__, header_text=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def _lowercase ( self : Any ):
# Initialize image_processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__, numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, np.ndarray )
__lowercase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
__lowercase = image_processor(
UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def _lowercase ( self : Dict ):
# Initialize image_processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__, torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, torch.Tensor )
# Test not batched input
__lowercase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
__lowercase = image_processor(
UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." ,)
@require_torch
@require_vision
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
__lowercase = PixaStructImageProcessingTester(self, num_channels=4 )
__lowercase = 3
@property
def _lowercase ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : str ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__, "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__, "do_convert_rgb" ) )
def _lowercase ( self : List[Any] ):
# Initialize image_processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, Image.Image )
# Test not batched input
__lowercase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
__lowercase = image_processor(
UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 17
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations(snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations_with_dp_array(
snake_case__ , snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__UpperCamelCase : List[str] = answer
return answer
__UpperCamelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [0] * (target + 1)
__UpperCamelCase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 298
| 0
|
from math import factorial, pi
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 3_0 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 3_0 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
SCREAMING_SNAKE_CASE_ : str = float(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 18
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 298
| 0
|
import os
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(grid[0] )
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowerCamelCase__ ):
for j in range(n_rows - 3 ):
lowerCamelCase_ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCamelCase_ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCamelCase_ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCamelCase_ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCamelCase_ = max(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if max_product > largest:
lowerCamelCase_ = max_product
return largest
def lowerCamelCase_ ( ):
lowerCamelCase_ = []
with open(os.path.dirname(lowerCamelCase__ ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
lowerCamelCase_ = [[int(lowerCamelCase__ ) for i in grid[j]] for j in range(len(lowerCamelCase__ ) )]
return largest_product(lowerCamelCase__ )
if __name__ == "__main__":
print(solution())
| 19
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict:
__UpperCamelCase : Dict = parent
__UpperCamelCase : Any = do_resize
__UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8}
__UpperCamelCase : Any = size_divisor
__UpperCamelCase : Optional[int] = do_rescale
__UpperCamelCase : Union[str, Any] = rescale_factor
__UpperCamelCase : int = do_normalize
__UpperCamelCase : List[Any] = do_center_crop
__UpperCamelCase : Optional[int] = image_mean
__UpperCamelCase : Tuple = image_std
__UpperCamelCase : Tuple = do_pad
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Dict = min_resolution
__UpperCamelCase : Optional[Any] = max_resolution
def a_ (self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
__UpperCamelCase : List[str] = self.size["shortest_edge"]
__UpperCamelCase : Optional[int] = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
__UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w
else:
__UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size
__UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size:
__UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = newh * scale
__UpperCamelCase : Union[str, Any] = neww * scale
__UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCamelCase , __UpperCamelCase : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCamelCase : int = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BridgeTowerImageProcessor if is_vision_available() else None
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self )
@property
def a_ (self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) )
def a_ (self ) -> List[str]:
pass
def a_ (self ) -> List[Any]:
# Initialize image processor
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> Tuple:
# Initialize image processor
__UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> int:
# Initialize image processor
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 298
| 0
|
from __future__ import annotations
import numpy as np
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> tuple[np.ndarray, np.ndarray]:
lowercase , lowercase : Optional[int] = np.shape(SCREAMING_SNAKE_CASE__ )
if rows != columns:
lowercase : Dict = (
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = np.zeros((rows, columns) )
lowercase : int = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE__ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase : int = (table[i][j] - total) / upper[j][j]
lowercase : int = 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE__ ) )
lowercase : Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" )
__UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
__UpperCamelCase : Union[str, Any] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
__UpperCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(snake_case__ ),
}
__UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ )
__UpperCamelCase : str = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = hf_param.shape
__UpperCamelCase : List[Any] = to_torch(params[gluon_param] )
__UpperCamelCase : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__UpperCamelCase : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__UpperCamelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase : BertSelfAttention = layer.attention.self
__UpperCamelCase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__UpperCamelCase : str = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__UpperCamelCase : Tuple = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__UpperCamelCase : BertSelfOutput = layer.attention.output
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__UpperCamelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__UpperCamelCase : BertIntermediate = layer.intermediate
__UpperCamelCase : Dict = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__UpperCamelCase : BertOutput = layer.output
__UpperCamelCase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__UpperCamelCase : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" )
__UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"]
# Get gluon output
__UpperCamelCase : Dict = mx.nd.array([input_ids] )
__UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
__UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
__UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0]
__UpperCamelCase : List[Any] = output_gluon[0].asnumpy()
__UpperCamelCase : Optional[int] = output_hf[0].detach().numpy()
__UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 298
| 0
|
from ..utils import DummyObject, requires_backends
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Tuple = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> int:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Dict:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Tuple = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
| 21
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def __lowerCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __lowerCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@require_beam
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> Optional[Any]:
import apache_beam as beam
__UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet
__UpperCamelCase : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
__UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> str:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a_ (self ) -> List[str]:
__UpperCamelCase : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
__UpperCamelCase : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = ["""pixel_values"""]
def __init__( self : List[str] , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : float = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : Tuple , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = size if size is not None else {"shortest_edge": 3_8_4}
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCAmelCase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : Any , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : float , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Optional[int] , ):
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_UpperCAmelCase = size["shortest_edge"]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCAmelCase = int(shortest_edge / crop_pct )
_UpperCAmelCase = get_resize_output_image_size(snake_case_ , size=snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case_ , size=(shortest_edge, shortest_edge) , data_format=snake_case_ , **snake_case_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case_ , size=(shortest_edge, shortest_edge) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : float = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : str , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=snake_case_ , size=snake_case_ , crop_pct=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 22
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 0
|
'''simple docstring'''
import datasets
UpperCamelCase__: Tuple = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
UpperCamelCase__: List[str] = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
UpperCamelCase__: List[Any] = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ) -> List[str]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE( datasets.Metric ):
"""simple docstring"""
def A ( self : List[Any] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def A ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> List[str]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
| 23
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298
| 0
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> Union[str, Any]:
__snake_case = OmegaConf.load(snake_case_ )
__snake_case = torch.load(snake_case_ , map_location='''cpu''' )['''model''']
__snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
__snake_case = {}
__snake_case = '''first_stage_model.'''
for key in keys:
if key.startswith(snake_case_ ):
__snake_case = state_dict[key]
# extract state_dict for UNetLDM
__snake_case = {}
__snake_case = '''model.diffusion_model.'''
for key in keys:
if key.startswith(snake_case_ ):
__snake_case = state_dict[key]
__snake_case = config.model.params.first_stage_config.params
__snake_case = config.model.params.unet_config.params
__snake_case = VQModel(**snake_case_ ).eval()
vqvae.load_state_dict(snake_case_ )
__snake_case = UNetLDMModel(**snake_case_ ).eval()
unet.load_state_dict(snake_case_ )
__snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case_ , )
__snake_case = LDMPipeline(snake_case_ , snake_case_ , snake_case_ )
pipeline.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
snake_case_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 24
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = '''RegNetConfig'''
# Base docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = '''tabby, tabby cat'''
_lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , )
__UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) )
__UpperCamelCase : Dict = self.normalization(_UpperCAmelCase )
__UpperCamelCase : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = config.num_channels
__UpperCamelCase : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def a_ (self , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) )
__UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" )
__UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase )
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
__UpperCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def a_ (self , _UpperCAmelCase ) -> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase )
for layer_module in self.attention:
__UpperCamelCase : str = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1
__UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : List[Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase : Optional[Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ),
]
__UpperCamelCase : Dict = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Tuple = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : str = in_channels != out_channels or stride != 1
__UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : Union[str, Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCamelCase : Union[str, Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ),
]
__UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> int:
__UpperCamelCase : str = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Any = layer_module(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ),
*[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def a_ (self , _UpperCAmelCase ) -> Any:
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase : Any = hidden_states + (hidden_state,)
__UpperCamelCase : Any = stage_module(_UpperCAmelCase )
if output_hidden_states:
__UpperCamelCase : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
A = RegNetConfig
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = config
__UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" )
__UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" )
__UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
@unpack_inputs
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : str = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : List[str] = encoder_outputs[0]
__UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
__UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = RegNetConfig
A = "regnet"
A = "pixel_values"
@property
def a_ (self ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Tuple = self.regnet(
pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = config.num_labels
__UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
# classification head
__UpperCamelCase : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Dict = self.regnet(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase )
__UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase )
__UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
if not return_dict:
__UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 298
| 0
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=33 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_input_mask
SCREAMING_SNAKE_CASE__ : int = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Dict = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = num_choices
SCREAMING_SNAKE_CASE__ : Tuple = scope
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = EsmModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : int = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[int] = ()
__UpperCamelCase : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Any = True
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = EsmModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __magic_name__ (self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Tuple = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ : Dict = EsmEmbeddings(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE__ : List[Any] = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ : Any = EsmEmbeddings(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.empty(2 , 4 , 30 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : Tuple = 33
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE__ : Tuple = model(SCREAMING_SNAKE_CASE__ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 25
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = torch.exp(snake_case__ )
__UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i)
__UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case__ ) - B / A
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
__UpperCamelCase : Any = config.output_attentions
__UpperCamelCase : Dict = config.output_hidden_states
__UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )]
def a_ (self , _UpperCAmelCase ) -> int:
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase : str = x
else:
__UpperCamelCase : List[Any] = x
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ()
__UpperCamelCase : Tuple = ()
__UpperCamelCase : Dict = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase : Tuple = all_hidden_states + (hidden_states,)
__UpperCamelCase : Optional[int] = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],)
__UpperCamelCase : Any = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : int = current_outputs + (all_attentions,)
__UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase : Dict = highway_exit[0]
__UpperCamelCase : Any = entropy(_UpperCAmelCase )
__UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
__UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase : int = all_hidden_states + (hidden_states,)
__UpperCamelCase : Dict = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : Optional[int] = outputs + (all_attentions,)
__UpperCamelCase : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = config
__UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase )
__UpperCamelCase : str = BertPooler(_UpperCAmelCase )
self.init_weights()
def a_ (self ) -> Any:
self.encoder.init_highway_pooler(self.pooler )
def a_ (self ) -> Optional[int]:
return self.embeddings.word_embeddings
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : int = value
def a_ (self , _UpperCAmelCase ) -> Tuple:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__UpperCamelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase : Any = encoder_attention_mask[:, None, None, :]
__UpperCamelCase : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase : Optional[int] = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__UpperCamelCase : Union[str, Any] = encoder_outputs[0]
__UpperCamelCase : Any = self.pooler(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : Tuple = message
__UpperCamelCase : Union[str, Any] = exit_layer # start from 1!
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__()
__UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase )
__UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def a_ (self , _UpperCAmelCase ) -> Any:
# Pooler
__UpperCamelCase : Optional[int] = encoder_outputs[0]
__UpperCamelCase : str = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase : Dict = bmodel_output[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Any = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Any:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : List[Any] = config.num_labels
__UpperCamelCase : List[Any] = config.num_hidden_layers
__UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase )
__UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int:
__UpperCamelCase : int = self.num_layers
try:
__UpperCamelCase : Tuple = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase : str = outputs[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Dict = self.classifier(_UpperCAmelCase )
__UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase : int = e.message
__UpperCamelCase : Optional[Any] = e.exit_layer
__UpperCamelCase : Optional[int] = outputs[0]
if not self.training:
__UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : List[str] = MSELoss()
__UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Dict = CrossEntropyLoss()
__UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase : List[Any] = []
for highway_exit in outputs[-1]:
__UpperCamelCase : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Union[str, Any] = MSELoss()
__UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Optional[Any] = CrossEntropyLoss()
__UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
__UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase : Dict = (loss,) + outputs
if not self.training:
__UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 298
| 0
|
_snake_case = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 26
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = '''config.json'''
_lowerCAmelCase = '''diffusion_pytorch_model.bin'''
_lowerCAmelCase = '''diffusion_flax_model.msgpack'''
_lowerCAmelCase = '''model.onnx'''
_lowerCAmelCase = '''diffusion_pytorch_model.safetensors'''
_lowerCAmelCase = '''weights.pb'''
_lowerCAmelCase = '''https://huggingface.co'''
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = '''diffusers_modules'''
_lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_lowerCAmelCase = ['''fp16''', '''non-ema''']
_lowerCAmelCase = '''.self_attn'''
| 298
| 0
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__lowercase : List[Any] = 2
class __UpperCamelCase :
def __init__( self , *, # begin keyword-only arguments
__a="<s>" , __a="<pad>" , __a="</s>" , __a="<unk>" , __a=None , ):
'''simple docstring'''
__a , __a , __a , __a : Dict = bos, unk, pad, eos
__a : Any = []
__a : List[str] = []
__a : List[str] = {}
__a : Any = self.add_symbol(__a )
__a : int = self.add_symbol(__a )
__a : str = self.add_symbol(__a )
__a : Optional[Any] = self.add_symbol(__a )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__a )
__a : Tuple = len(self.symbols )
def __eq__( self , __a ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self , __a ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self , __a ):
'''simple docstring'''
return sym in self.indices
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
__a : Tuple = cls()
d.add_from_file(__a )
return d
def __UpperCAmelCase ( self , __a , __a=1 , __a=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
__a : List[Any] = self.indices[word]
__a : List[Any] = self.count[idx] + n
return idx
else:
__a : Union[str, Any] = len(self.symbols )
__a : Tuple = idx
self.symbols.append(__a )
self.count.append(__a )
return idx
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return 0
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if isinstance(__a , __a ):
try:
with open(__a , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(__a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(__a ) )
return
__a : List[Any] = f.readlines()
__a : str = self._load_meta(__a )
for line in lines[indices_start_line:]:
try:
__a , __a : Optional[int] = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__a : Optional[int] = True
__a , __a : Optional[Any] = line.rsplit(' ' , 1 )
else:
__a : List[str] = False
__a : Any = int(__a )
__a : Optional[int] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(__a ) )
self.add_symbol(__a , n=__a , overwrite=__a )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__a : Tuple = dict((re.sub(r'@@$' , '' , _SCREAMING_SNAKE_CASE ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _SCREAMING_SNAKE_CASE ), v) for k, v in d.items() )
__a : str = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__a : str = d[k] # restore
return da
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict ):
# prep
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'checkpoint.pt' )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : List[Any] = chkpt['cfg']['model']
# dicts
__a : str = os.path.join(_SCREAMING_SNAKE_CASE , 'dict.txt' )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__a : int = Dictionary.load(_SCREAMING_SNAKE_CASE )
__a : List[Any] = rewrite_dict_keys(src_dict.indices )
__a : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
__a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# merges_file (bpecodes)
__a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'bpecodes' )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__a : Any = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# model config
__a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' )
__a : str = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.0_2,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# tokenizer config
__a : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : List[Any] = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# model
__a : Union[str, Any] = chkpt['model']
# remove unneeded keys
__a : Tuple = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : List[str] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__a : Optional[int] = model_state_dict.pop(_SCREAMING_SNAKE_CASE )
else:
__a : int = model_state_dict.pop(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = BioGptConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = BioGptForCausalLM(_SCREAMING_SNAKE_CASE )
# check that it loads ok
model_new.load_state_dict(_SCREAMING_SNAKE_CASE )
# save
__a : str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print('Conversion is done!' )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowercase : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 27
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298
| 0
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = BloomTokenizerFast
_SCREAMING_SNAKE_CASE = BloomTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = """tokenizer_file"""
_SCREAMING_SNAKE_CASE = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def A ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : List[str] , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
UpperCamelCase = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
UpperCamelCase = tokenizer.batch_encode_plus(UpperCamelCase__ )['input_ids']
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : Tuple=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
UpperCamelCase = 'This is a simple input'
UpperCamelCase = ['This is a simple input 1', 'This is a simple input 2']
UpperCamelCase = ('This is a simple input', 'This is a pair')
UpperCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
UpperCamelCase = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' , )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=UpperCamelCase__ )
UpperCamelCase = next(iter(UpperCamelCase__ ) )['premise'] # pick up one data
UpperCamelCase = list(sample_data.values() )
UpperCamelCase = list(map(tokenizer.encode , UpperCamelCase__ ) )
UpperCamelCase = [tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 28
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298
| 0
|
from collections import deque
from math import floor
from random import random
from time import time
class lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
UpperCAmelCase_ : Dict = {}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1 ) -> List[Any]:
if self.graph.get(_UpperCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase_ : Optional[int] = [[w, v]]
if not self.graph.get(_UpperCamelCase ):
UpperCAmelCase_ : int = []
def __UpperCAmelCase ( self ) -> Dict:
return list(self.graph )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
if self.graph.get(_UpperCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 , _UpperCamelCase=-1 ) -> Union[str, Any]:
if s == d:
return []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Optional[int] = []
if s == -2:
UpperCAmelCase_ : Any = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCamelCase ) != 0:
UpperCAmelCase_ : Union[str, Any] = stack[len(_UpperCamelCase ) - 1]
else:
UpperCAmelCase_ : int = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return visited
def __UpperCAmelCase ( self , _UpperCamelCase=-1 ) -> Union[str, Any]:
if c == -1:
UpperCAmelCase_ : Any = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_UpperCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
UpperCAmelCase_ : int = floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCamelCase , _UpperCamelCase , 1 )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = deque()
UpperCAmelCase_ : Dict = []
if s == -2:
UpperCAmelCase_ : Tuple = list(self.graph )[0]
d.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
while d:
UpperCAmelCase_ : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
return len(self.graph[u] )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 ) -> Optional[int]:
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[Any] = []
if s == -2:
UpperCAmelCase_ : Optional[Any] = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
UpperCAmelCase_ : List[str] = s
UpperCAmelCase_ : Union[str, Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_UpperCamelCase ) != 0:
UpperCAmelCase_ : List[Any] = stack[len(_UpperCamelCase ) - 1]
else:
UpperCAmelCase_ : Optional[int] = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return sorted_nodes
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Union[str, Any] = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
UpperCAmelCase_ : Any = -2
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : List[str] = s
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : Dict = len(_UpperCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : Any = True
if len(_UpperCamelCase ) != 0:
UpperCAmelCase_ : List[str] = stack[len(_UpperCamelCase ) - 1]
else:
UpperCAmelCase_ : int = False
indirect_parents.append(_UpperCamelCase )
UpperCAmelCase_ : Tuple = s
UpperCAmelCase_ : List[Any] = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return list(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Any = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
UpperCAmelCase_ : Tuple = -2
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Tuple = s
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : int = len(_UpperCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : List[Any] = True
if len(_UpperCamelCase ) != 0:
UpperCAmelCase_ : int = stack[len(_UpperCamelCase ) - 1]
else:
UpperCAmelCase_ : List[Any] = False
indirect_parents.append(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = s
UpperCAmelCase_ : Dict = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return False
def __UpperCAmelCase ( self , _UpperCamelCase=-2 , _UpperCamelCase=-1 ) -> Tuple:
UpperCAmelCase_ : Optional[int] = time()
self.dfs(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = time()
return end - begin
def __UpperCAmelCase ( self , _UpperCamelCase=-2 ) -> int:
UpperCAmelCase_ : int = time()
self.bfs(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = time()
return end - begin
class lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> str:
UpperCAmelCase_ : Optional[Any] = {}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1 ) -> Any:
# check if the u exists
if self.graph.get(_UpperCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase_ : List[str] = [[w, v]]
# add the other way
if self.graph.get(_UpperCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase_ : List[str] = [[w, u]]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
if self.graph.get(_UpperCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCamelCase )
# the other way round
if self.graph.get(_UpperCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 , _UpperCamelCase=-1 ) -> List[str]:
if s == d:
return []
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Union[str, Any] = []
if s == -2:
UpperCAmelCase_ : Tuple = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
UpperCAmelCase_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCamelCase ) != 0:
UpperCAmelCase_ : Dict = stack[len(_UpperCamelCase ) - 1]
else:
UpperCAmelCase_ : Dict = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return visited
def __UpperCAmelCase ( self , _UpperCamelCase=-1 ) -> Any:
if c == -1:
UpperCAmelCase_ : List[str] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_UpperCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
UpperCAmelCase_ : List[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCamelCase , _UpperCamelCase , 1 )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = deque()
UpperCAmelCase_ : List[str] = []
if s == -2:
UpperCAmelCase_ : List[str] = list(self.graph )[0]
d.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
while d:
UpperCAmelCase_ : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return len(self.graph[u] )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : int = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : str = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = -2
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : List[Any] = s
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : Any = len(_UpperCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : Optional[int] = True
if len(_UpperCamelCase ) != 0:
UpperCAmelCase_ : Any = stack[len(_UpperCamelCase ) - 1]
else:
UpperCAmelCase_ : Optional[Any] = False
indirect_parents.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = s
UpperCAmelCase_ : Any = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return list(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : List[str] = list(self.graph )[0]
stack.append(_UpperCamelCase )
visited.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = -2
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[str] = s
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : Any = len(_UpperCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : int = True
if len(_UpperCamelCase ) != 0:
UpperCAmelCase_ : Dict = stack[len(_UpperCamelCase ) - 1]
else:
UpperCAmelCase_ : Optional[int] = False
indirect_parents.append(_UpperCamelCase )
UpperCAmelCase_ : Dict = s
UpperCAmelCase_ : Optional[Any] = ss
# check if se have reached the starting point
if len(_UpperCamelCase ) == 0:
return False
def __UpperCAmelCase ( self ) -> List[str]:
return list(self.graph )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 , _UpperCamelCase=-1 ) -> Any:
UpperCAmelCase_ : Optional[int] = time()
self.dfs(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = time()
return end - begin
def __UpperCAmelCase ( self , _UpperCamelCase=-2 ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = time()
self.bfs(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = time()
return end - begin
| 29
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowercase__:
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
lowercase_ = data
lowercase_ = None
class lowercase__:
"""simple docstring"""
def __init__( self : Any ) -> List[Any]:
lowercase_ = None
lowercase_ = None
def __iter__( self : int ) -> Iterator[Any]:
lowercase_ = self.head
while self.head:
yield node.data
lowercase_ = node.next
if node == self.head:
break
def __len__( self : str ) -> int:
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ) -> Tuple:
return "->".join(str(SCREAMING_SNAKE_CASE_ ) for item in iter(self ) )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None:
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Any ) -> None:
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> None:
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
lowercase_ = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
lowercase_ = new_node # first node points itself
lowercase_ = lowercase_ = new_node
elif index == 0: # insert at head
lowercase_ = self.head
lowercase_ = lowercase_ = new_node
else:
lowercase_ = self.head
for _ in range(index - 1 ):
lowercase_ = temp.next
lowercase_ = temp.next
lowercase_ = new_node
if index == len(self ) - 1: # insert at tail
lowercase_ = new_node
def _lowercase ( self : int ) -> List[Any]:
return self.delete_nth(0 )
def _lowercase ( self : str ) -> Any:
return self.delete_nth(len(self ) - 1 )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
lowercase_ = self.head
if self.head == self.tail: # just one node
lowercase_ = lowercase_ = None
elif index == 0: # delete head node
lowercase_ = self.tail.next.next
lowercase_ = self.head.next
else:
lowercase_ = self.head
for _ in range(index - 1 ):
lowercase_ = temp.next
lowercase_ = temp.next
lowercase_ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowercase_ = temp
return delete_node.data
def _lowercase ( self : Union[str, Any] ) -> bool:
return len(self ) == 0
def a ( ):
'''simple docstring'''
lowercase_ = CircularLinkedList()
assert len(snake_case__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case__ ) == i
circular_linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCAmelCase ( snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : bytes ) -> str:
"""simple docstring"""
return "".join([hex(_UpperCAmelCase )[2:].zfill(2 ).upper() for byte in list(_UpperCAmelCase )] )
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> bytes:
"""simple docstring"""
if (len(_UpperCAmelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_UpperCAmelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_UpperCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Dict = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
snake_case__ : Union[str, Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
snake_case__ : str = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case__ : List[Any] = False
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
a_ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
a_ : Optional[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
a_ : Optional[int] = MultilingualCLIP(SCREAMING_SNAKE_CASE__ )
a_ : Any = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
a_ : str = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a_ : Optional[int] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
a_ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : Tuple = self.dummy_text_encoder
a_ : Union[str, Any] = self.dummy_tokenizer
a_ : List[Any] = self.dummy_unet
a_ : Optional[Any] = self.dummy_movq
a_ : Any = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , steps_offset=1 , prediction_type='epsilon' , thresholding=SCREAMING_SNAKE_CASE__ , )
a_ : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> Dict:
a_ : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(SCREAMING_SNAKE_CASE__ )
# create init_image
a_ : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a_ : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
a_ : Optional[int] = np.ones((6_4, 6_4) , dtype=np.floataa )
a_ : Dict = 0
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : int = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : Dict = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : Optional[Any] = 'cpu'
a_ : List[Any] = self.get_dummy_components()
a_ : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
a_ : Any = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
a_ : Tuple = output.images
a_ : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
a_ : List[str] = image[0, -3:, -3:, -1]
a_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
a_ : List[str] = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
a_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
a_ : Union[str, Any] = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
a_ : Tuple = 0
a_ : Union[str, Any] = 'a hat'
a_ : List[str] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
a_ : int = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
a_ : Union[str, Any] = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
a_ , a_ : List[str] = pipe_prior(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a_ : str = pipeline(
SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='np' , )
a_ : List[str] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 32
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
| 0
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A : str = logging.get_logger(__name__)
__A : Union[str, Any] = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase ( __snake_case : str ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase_ : Dict = model_type_to_module_name(__snake_case )
lowercase_ : Optional[Any] = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase_ : str = importlib.import_module('''transformers''' )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def lowercase ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : List[str] , ):
lowercase_ : Optional[Any] = get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(__snake_case , encoding='''utf-8''' ) as reader:
return json.load(__snake_case )
class _UpperCAmelCase :
def __init__( self : Tuple ) -> Union[str, Any]:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def A ( cls : Union[str, Any] , A : List[Any] , **A : Tuple ) -> Dict:
lowercase_ : Tuple = kwargs.pop('''config''' , A )
lowercase_ : Tuple = kwargs.pop('''trust_remote_code''' , A )
lowercase_ : Optional[Any] = True
lowercase_ , lowercase_ : Optional[Any] = ImageProcessingMixin.get_image_processor_dict(A , **A )
lowercase_ : Union[str, Any] = config_dict.get('''image_processor_type''' , A )
lowercase_ : Tuple = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
lowercase_ : Optional[int] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowercase_ : Union[str, Any] = config_dict.pop('''feature_extractor_type''' , A )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
lowercase_ : List[Any] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowercase_ : str = config_dict['''auto_map''']['''AutoFeatureExtractor''']
lowercase_ : int = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(A , A ):
lowercase_ : List[Any] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.image_processor_type``
lowercase_ : Tuple = getattr(A , '''image_processor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
lowercase_ : Tuple = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
lowercase_ : Tuple = image_processor_class_from_name(A )
lowercase_ : Tuple = image_processor_auto_map is not None
lowercase_ : Any = image_processor_class is not None or type(A ) in IMAGE_PROCESSOR_MAPPING
lowercase_ : Tuple = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowercase_ : Dict = get_class_from_dynamic_module(
A , A , **A )
lowercase_ : List[str] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(A , **A )
elif image_processor_class is not None:
return image_processor_class.from_dict(A , **A )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(A ) in IMAGE_PROCESSOR_MAPPING:
lowercase_ : int = IMAGE_PROCESSOR_MAPPING[type(A )]
return image_processor_class.from_dict(A , **A )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def A ( A : int , A : Dict ) -> Dict:
IMAGE_PROCESSOR_MAPPING.register(A , A )
| 33
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298
| 0
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A =True
except ImportError:
A =False
A =logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ (_a : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _a ( __a ):
@staticmethod
def A ( lowercase : ArgumentParser ):
'''simple docstring'''
UpperCAmelCase = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=lowercase , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=lowercase , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowercase )
def __init__( self : List[Any] , lowercase : bool , lowercase : str , lowercase : Dict=None , *lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = testing
UpperCAmelCase = testing_file
UpperCAmelCase = path
def A ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(lowercase ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCAmelCase = (
Path(lowercase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCAmelCase = json.load(lowercase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase , extra_context=lowercase , )
UpperCAmelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCAmelCase = json.load(lowercase )
UpperCAmelCase = configuration['''lowercase_modelname''']
UpperCAmelCase = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"{directory}/configuration.json" )
UpperCAmelCase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(lowercase , exist_ok=lowercase )
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=lowercase )
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(lowercase : Union[str, Any] ):
with open(lowercase , '''r''' ) as f:
UpperCAmelCase = f.readlines()
with open(lowercase , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase : str , lowercase : str , lowercase : List[str] ):
# Create temp file
UpperCAmelCase , UpperCAmelCase = mkstemp()
UpperCAmelCase = False
with fdopen(lowercase , '''w''' ) as new_file:
with open(lowercase ) as old_file:
for line in old_file:
new_file.write(lowercase )
if line_to_copy_below in line:
UpperCAmelCase = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase )
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(lowercase , lowercase )
# Remove original file
remove(lowercase )
# Move new file
move(lowercase , lowercase )
def skip_units(lowercase : List[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase : Tuple ):
with open(lowercase ) as datafile:
UpperCAmelCase = []
UpperCAmelCase = False
UpperCAmelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase = line.split('''"''' )[1]
UpperCAmelCase = skip_units(lowercase )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase = line.split('''"''' )[1]
UpperCAmelCase = skip_units(lowercase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase , lowercase , lowercase )
UpperCAmelCase = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase = []
elif "##" not in line:
lines_to_copy.append(lowercase )
remove(lowercase )
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(lowercase )
| 34
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
'''simple docstring'''
import os
def __snake_case( ) -> Optional[Any]:
with open(os.path.dirname(_lowerCAmelCase ) + """/p022_names.txt""" ) as file:
snake_case__ : int = str(file.readlines()[0] )
snake_case__ : Tuple = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = 0
for i, name in enumerate(_lowerCAmelCase ):
for letter in name:
name_score += ord(_lowerCAmelCase ) - 64
total_score += (i + 1) * name_score
snake_case__ : List[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 35
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 0
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_snake_case = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_snake_case = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def A ( _lowerCamelCase ):
'''simple docstring'''
if "://" in dataset_path:
_lowerCAmelCase : Any = dataset_path.split("://" )[1]
return dataset_path
def A ( _lowerCamelCase ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = not is_remote_filesystem(_lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase ) , fs._strip_protocol(_lowerCamelCase ) )
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase )
def A ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : int = None
_lowerCAmelCase : List[Any] = threading.Lock()
| 36
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for attribute in key.split(""".""" ):
lowerCAmelCase__ : Optional[Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
lowerCAmelCase__ : int = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
lowerCAmelCase__ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_g":
lowerCAmelCase__ : Any = value
elif weight_type == "weight_v":
lowerCAmelCase__ : Optional[int] = value
elif weight_type == "bias":
lowerCAmelCase__ : List[str] = value
elif weight_type == "running_mean":
lowerCAmelCase__ : List[str] = value
elif weight_type == "running_var":
lowerCAmelCase__ : int = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ : Tuple = value
elif weight_type == "inv_freq":
lowerCAmelCase__ : str = value
else:
lowerCAmelCase__ : str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = fairseq_model.state_dict()
lowerCAmelCase__ : Any = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ : int = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ : List[Any] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase__ : List[Any] = True
if "*" in mapped_key:
lowerCAmelCase__ : str = name.split(UpperCamelCase )[0].split(""".""" )[-2]
lowerCAmelCase__ : List[str] = mapped_key.replace("""*""" , UpperCamelCase )
if "pos_bias_u" in name:
lowerCAmelCase__ : List[str] = None
elif "pos_bias_v" in name:
lowerCAmelCase__ : List[str] = None
elif "weight_g" in name:
lowerCAmelCase__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ : Union[str, Any] = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ : Union[str, Any] = """weight"""
elif "running_mean" in name:
lowerCAmelCase__ : List[Any] = """running_mean"""
elif "inv_freq" in name:
lowerCAmelCase__ : int = """inv_freq"""
elif "running_var" in name:
lowerCAmelCase__ : Any = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase__ : Union[str, Any] = """num_batches_tracked"""
else:
lowerCAmelCase__ : str = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ : Any = name.split(""".""" )
lowerCAmelCase__ : Optional[int] = int(items[0] )
lowerCAmelCase__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ : Tuple = WavaVecaConformerConfig.from_pretrained(UpperCamelCase , hidden_act="""swish""" )
else:
lowerCAmelCase__ : Union[str, Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase__ : Tuple = """rotary"""
if is_finetuned:
if dict_path:
lowerCAmelCase__ : int = Dictionary.load(UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ : Tuple = target_dict.pad_index
lowerCAmelCase__ : Union[str, Any] = target_dict.bos_index
lowerCAmelCase__ : List[str] = target_dict.eos_index
lowerCAmelCase__ : Dict = len(target_dict.symbols )
lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , """vocab.json""" )
if not os.path.isdir(UpperCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase ) )
return
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : int = 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = WavaVecaCTCTokenizer(
UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCamelCase , )
lowerCAmelCase__ : int = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase , return_attention_mask=UpperCamelCase , )
lowerCAmelCase__ : List[Any] = WavaVecaProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = WavaVecaConformerForCTC(UpperCamelCase )
else:
lowerCAmelCase__ : Tuple = WavaVecaConformerForPreTraining(UpperCamelCase )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCAmelCase__ : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
lowerCAmelCase__ : List[str] = fairseq.tasks.setup_task(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(UpperCamelCase , UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 37
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298
| 0
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = [0] * len(__magic_name__ )
UpperCamelCase :int = []
UpperCamelCase :str = []
UpperCamelCase :str = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
UpperCamelCase :str = queue.pop(0 )
cnt += 1
topo.append(__magic_name__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__magic_name__ )
if cnt != len(__magic_name__ ):
print("""Cycle exists""" )
else:
print(__magic_name__ )
# Adjacency List of Graph
UpperCAmelCase_ : str = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 38
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None )-> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCamelCase :
"""simple docstring"""
UpperCamelCase__ = OPTConfig
UpperCamelCase__ = {}
UpperCamelCase__ = "gelu"
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=20 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=16 , UpperCAmelCase=16 , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = embed_dim
_UpperCAmelCase = word_embed_proj_dim
_UpperCAmelCase = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCAmelCase , **self.config_updates , )
_UpperCAmelCase = prepare_opt_inputs_dict(UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFOPTModel(config=UpperCAmelCase )
_UpperCAmelCase = inputs_dict['input_ids']
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict['attention_mask'][:1, :]
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 )
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = 10
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFOPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCAmelCase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_UpperCAmelCase = model_class(config=UpperCAmelCase )
_UpperCAmelCase = _get_word_embedding_weight(UpperCAmelCase , model.get_input_embeddings() )
_UpperCAmelCase = _get_word_embedding_weight(UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCAmelCase )
_UpperCAmelCase = _get_word_embedding_weight(UpperCAmelCase , model.get_input_embeddings() )
_UpperCAmelCase = _get_word_embedding_weight(UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_UpperCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCAmelCase )
# check that weights remain the same after resizing
_UpperCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCAmelCase = False
self.assertTrue(UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCAmelCase )
_UpperCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCAmelCase = False
self.assertTrue(UpperCAmelCase )
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
return tf.constant(__lowerCAmelCase , dtype=tf.intaa )
@require_tf
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = 99
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_UpperCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_UpperCAmelCase = input_ids.shape[0]
_UpperCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
_UpperCAmelCase = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCAmelCase = tf.not_equal(UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
_UpperCAmelCase = model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase ).last_hidden_state
_UpperCAmelCase = (1, 11, 512)
self.assertEqual(output.shape , UpperCAmelCase )
_UpperCAmelCase = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase , atol=4e-3 ) )
_UpperCAmelCase = tf.function(UpperCAmelCase , jit_compile=UpperCAmelCase )
_UpperCAmelCase = xla_generate(UpperCAmelCase , UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase , atol=4e-2 ) )
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = 'facebook/opt-350m'
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
_UpperCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
_UpperCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_UpperCAmelCase = tokenizer(UpperCAmelCase , return_tensors='tf' , padding=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_UpperCAmelCase = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-4 ) )
_UpperCAmelCase = tf.function(UpperCAmelCase , jit_compile=UpperCAmelCase )
_UpperCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-4 ) )
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'facebook/opt-125m'
_UpperCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_UpperCAmelCase = []
_UpperCAmelCase = GPTaTokenizer.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(UpperCAmelCase )
for prompt in self.prompts:
_UpperCAmelCase = tokenizer(UpperCAmelCase , return_tensors='tf' ).input_ids
_UpperCAmelCase = model.generate(UpperCAmelCase , max_length=10 )
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'facebook/opt-350m'
_UpperCAmelCase = GPTaTokenizer.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = 'left'
# use different length sentences to test batching
_UpperCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
_UpperCAmelCase = tokenizer(UpperCAmelCase , return_tensors='tf' , padding=UpperCAmelCase )
_UpperCAmelCase = inputs['input_ids']
_UpperCAmelCase = model.generate(input_ids=UpperCAmelCase , attention_mask=inputs['attention_mask'] )
_UpperCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_UpperCAmelCase = model.generate(input_ids=UpperCAmelCase )
_UpperCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_UpperCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_UpperCAmelCase = model.generate(input_ids=UpperCAmelCase , max_length=model.config.max_length - num_paddings )
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'facebook/opt-350m'
_UpperCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_UpperCAmelCase = []
_UpperCAmelCase = GPTaTokenizer.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(UpperCAmelCase )
for prompt in self.prompts:
_UpperCAmelCase = tokenizer(UpperCAmelCase , return_tensors='tf' ).input_ids
_UpperCAmelCase = model.generate(UpperCAmelCase , max_length=10 )
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
| 39
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations(snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations_with_dp_array(
snake_case__ , snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__UpperCamelCase : List[str] = answer
return answer
__UpperCamelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [0] * (target + 1)
__UpperCamelCase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 298
| 0
|
"""simple docstring"""
def lowercase ( A_ = 600_851_475_143 )-> int:
'''simple docstring'''
try:
a : Union[str, Any] = int(A_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
a : Tuple = 1
a : Tuple = 2
while i * i <= n:
while n % i == 0:
a : Optional[Any] = i
n //= i
i += 1
if n > 1:
a : List[Any] = n
return int(A_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 298
| 0
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self: Any ):
torch.manual_seed(0 )
lowerCamelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
lowerCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCamelCase__ : str = CLIPTextModel(UpperCamelCase__ )
lowerCamelCase__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__ : Dict = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple=0 ):
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowerCamelCase__ : Any = torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase__ : int = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCamelCase__ : Tuple = 2
lowerCamelCase__ : Optional[Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , )
lowerCamelCase__ : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ : List[str] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
lowerCamelCase__ : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowerCamelCase_ ( self: Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase_ ( self: Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCamelCase_ ( self: Any ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase_ ( self: str ):
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCamelCase__: Tuple ):
if isinstance(UpperCamelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCamelCase__ : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase__ )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase__ )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCamelCase__ : Optional[int] = CLIPTextModel(UpperCamelCase__ )
lowerCamelCase__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__ : Any = MultiControlNetModel([controlneta, controlneta] )
lowerCamelCase__ : List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any]=0 ):
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowerCamelCase__ : int = torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase__ : Optional[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCamelCase__ : Any = 2
lowerCamelCase__ : List[str] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ),
]
lowerCamelCase__ : Tuple = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ : Any = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
lowerCamelCase__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.get_dummy_components()
lowerCamelCase__ : List[str] = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = 10.0
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase__ )
lowerCamelCase__ : List[str] = steps
lowerCamelCase__ : Optional[int] = scale
lowerCamelCase__ : Union[str, Any] = pipe(**UpperCamelCase__ )[0]
lowerCamelCase__ : Any = self.get_dummy_inputs(UpperCamelCase__ )
lowerCamelCase__ : Any = steps
lowerCamelCase__ : Union[str, Any] = scale
lowerCamelCase__ : Tuple = pipe(**UpperCamelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCamelCase__ : str = self.get_dummy_inputs(UpperCamelCase__ )
lowerCamelCase__ : Tuple = steps
lowerCamelCase__ : int = scale
lowerCamelCase__ : List[str] = pipe(**UpperCamelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = steps
lowerCamelCase__ : Optional[Any] = scale
lowerCamelCase__ : Optional[Any] = pipe(**UpperCamelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def lowerCamelCase_ ( self: str ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase_ ( self: int ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCamelCase_ ( self: int ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[Any] = self.get_dummy_components()
lowerCamelCase__ : str = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCamelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCamelCase__ : Tuple = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCamelCase__ , controlnet=UpperCamelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase__ : List[Any] = """evil space-punk bird"""
lowerCamelCase__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCamelCase__ : Union[str, Any] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCamelCase__ : Optional[int] = pipe(
UpperCamelCase__ , UpperCamelCase__ , control_image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCamelCase__ : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
lowerCamelCase__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9e-2
| 41
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict:
__UpperCamelCase : Dict = parent
__UpperCamelCase : Any = do_resize
__UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8}
__UpperCamelCase : Any = size_divisor
__UpperCamelCase : Optional[int] = do_rescale
__UpperCamelCase : Union[str, Any] = rescale_factor
__UpperCamelCase : int = do_normalize
__UpperCamelCase : List[Any] = do_center_crop
__UpperCamelCase : Optional[int] = image_mean
__UpperCamelCase : Tuple = image_std
__UpperCamelCase : Tuple = do_pad
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Dict = min_resolution
__UpperCamelCase : Optional[Any] = max_resolution
def a_ (self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
__UpperCamelCase : List[str] = self.size["shortest_edge"]
__UpperCamelCase : Optional[int] = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
__UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w
else:
__UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size
__UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size:
__UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = newh * scale
__UpperCamelCase : Union[str, Any] = neww * scale
__UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCamelCase , __UpperCamelCase : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCamelCase : int = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BridgeTowerImageProcessor if is_vision_available() else None
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self )
@property
def a_ (self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) )
def a_ (self ) -> List[str]:
pass
def a_ (self ) -> List[Any]:
# Initialize image processor
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> Tuple:
# Initialize image processor
__UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> int:
# Initialize image processor
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 298
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Dict:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "weight" in name:
_snake_case = 'weight'
elif "bias" in name:
_snake_case = 'bias'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> int:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = SEWConfig()
if is_finetuned:
_snake_case = model.wav_encoder.wav_model.cfg
else:
_snake_case = model.cfg
_snake_case = fs_config.conv_bias
_snake_case = eval(fs_config.conv_feature_layers )
_snake_case = [x[0] for x in conv_layers]
_snake_case = [x[1] for x in conv_layers]
_snake_case = [x[2] for x in conv_layers]
_snake_case = 'gelu'
_snake_case = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_snake_case = 0.0
_snake_case = fs_config.activation_fn.name
_snake_case = fs_config.encoder_embed_dim
_snake_case = 0.0_2
_snake_case = fs_config.encoder_ffn_embed_dim
_snake_case = 1e-5
_snake_case = fs_config.encoder_layerdrop
_snake_case = fs_config.encoder_attention_heads
_snake_case = fs_config.conv_pos_groups
_snake_case = fs_config.conv_pos
_snake_case = len(__A )
_snake_case = fs_config.encoder_layers
_snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_snake_case = model.cfg
_snake_case = fs_config.final_dropout
_snake_case = fs_config.layerdrop
_snake_case = fs_config.activation_dropout
_snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_snake_case = fs_config.attention_dropout
_snake_case = fs_config.dropout_input
_snake_case = fs_config.dropout
_snake_case = fs_config.mask_channel_length
_snake_case = fs_config.mask_channel_prob
_snake_case = fs_config.mask_length
_snake_case = fs_config.mask_prob
_snake_case = 'Wav2Vec2FeatureExtractor'
_snake_case = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None , __A=None , __A=True ) -> List[str]:
if is_finetuned:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_snake_case = SEWConfig.from_pretrained(__A )
else:
_snake_case = convert_config(model[0] , __A )
_snake_case = model[0].eval()
_snake_case = True if config.feat_extract_norm == 'layer' else False
_snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
if is_finetuned:
if dict_path:
_snake_case = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.eos_index
_snake_case = len(target_dict.symbols )
_snake_case = os.path.join(__A , 'vocab.json' )
if not os.path.isdir(__A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
with open(__A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __A )
_snake_case = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__A , )
_snake_case = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
_snake_case = SEWForCTC(__A )
else:
_snake_case = SEWModel(__A )
feature_extractor.save_pretrained(__A )
recursively_load_weights(__A , __A , __A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 42
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" )
__UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
__UpperCamelCase : Union[str, Any] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
__UpperCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(snake_case__ ),
}
__UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ )
__UpperCamelCase : str = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = hf_param.shape
__UpperCamelCase : List[Any] = to_torch(params[gluon_param] )
__UpperCamelCase : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__UpperCamelCase : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__UpperCamelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase : BertSelfAttention = layer.attention.self
__UpperCamelCase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__UpperCamelCase : str = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__UpperCamelCase : Tuple = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__UpperCamelCase : BertSelfOutput = layer.attention.output
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__UpperCamelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__UpperCamelCase : BertIntermediate = layer.intermediate
__UpperCamelCase : Dict = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__UpperCamelCase : BertOutput = layer.output
__UpperCamelCase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__UpperCamelCase : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" )
__UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"]
# Get gluon output
__UpperCamelCase : Dict = mx.nd.array([input_ids] )
__UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
__UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
__UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0]
__UpperCamelCase : List[Any] = output_gluon[0].asnumpy()
__UpperCamelCase : Optional[int] = output_hf[0].detach().numpy()
__UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 298
| 0
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = SwinConfig()
__UpperCamelCase :Tuple = swin_name.split('''_''' )
__UpperCamelCase :Union[str, Any] = name_split[1]
__UpperCamelCase :List[Any] = int(name_split[4] )
__UpperCamelCase :Tuple = int(name_split[3][-1] )
if model_size == "tiny":
__UpperCamelCase :List[str] = 96
__UpperCamelCase :int = (2, 2, 6, 2)
__UpperCamelCase :str = (3, 6, 12, 24)
elif model_size == "small":
__UpperCamelCase :str = 96
__UpperCamelCase :int = (2, 2, 18, 2)
__UpperCamelCase :int = (3, 6, 12, 24)
elif model_size == "base":
__UpperCamelCase :List[Any] = 128
__UpperCamelCase :List[str] = (2, 2, 18, 2)
__UpperCamelCase :List[str] = (4, 8, 16, 32)
else:
__UpperCamelCase :str = 192
__UpperCamelCase :Union[str, Any] = (2, 2, 18, 2)
__UpperCamelCase :Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
__UpperCamelCase :Dict = 21_841
else:
__UpperCamelCase :List[str] = 1_000
__UpperCamelCase :str = '''huggingface/label-files'''
__UpperCamelCase :Optional[int] = '''imagenet-1k-id2label.json'''
__UpperCamelCase :str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase :Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__UpperCamelCase :Optional[Any] = idalabel
__UpperCamelCase :Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase :Union[str, Any] = img_size
__UpperCamelCase :Any = num_classes
__UpperCamelCase :Union[str, Any] = embed_dim
__UpperCamelCase :Optional[Any] = depths
__UpperCamelCase :Any = num_heads
__UpperCamelCase :str = window_size
return config
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if "patch_embed.proj" in name:
__UpperCamelCase :List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCamelCase :int = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__UpperCamelCase :List[Any] = '''encoder.''' + name
if "attn.proj" in name:
__UpperCamelCase :List[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__UpperCamelCase :List[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__UpperCamelCase :List[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__UpperCamelCase :Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__UpperCamelCase :Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__UpperCamelCase :Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
__UpperCamelCase :Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
__UpperCamelCase :Optional[Any] = '''layernorm.bias'''
if "head" in name:
__UpperCamelCase :Dict = name.replace('''head''' , '''classifier''' )
else:
__UpperCamelCase :Dict = '''swin.''' + name
return name
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__UpperCamelCase :Optional[int] = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
__UpperCamelCase :Tuple = key.split('''.''' )
__UpperCamelCase :Union[str, Any] = int(key_split[1] )
__UpperCamelCase :str = int(key_split[3] )
__UpperCamelCase :Any = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase :Dict = val[:dim, :]
__UpperCamelCase :List[str] = val[
dim : dim * 2, :
]
__UpperCamelCase :List[str] = val[-dim:, :]
else:
__UpperCamelCase :int = val[
:dim
]
__UpperCamelCase :List[Any] = val[
dim : dim * 2
]
__UpperCamelCase :Any = val[
-dim:
]
else:
__UpperCamelCase :List[str] = val
return orig_state_dict
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
__UpperCamelCase :List[Any] = get_swin_config(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
__UpperCamelCase :Union[str, Any] = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
__UpperCamelCase :Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__UpperCamelCase :Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
__UpperCamelCase :Any = timm_model(inputs['''pixel_values'''] )
__UpperCamelCase :List[Any] = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 43
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def __lowerCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __lowerCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@require_beam
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> Optional[Any]:
import apache_beam as beam
__UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet
__UpperCamelCase : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
__UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> str:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a_ (self ) -> List[str]:
__UpperCamelCase : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
__UpperCamelCase : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 0
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(a__ , """num_attention_heads""" ) )
class __A :
def __init__( self , a__ , a__=13 , a__=64 , a__=3 , a__=3 , a__=2 , a__=1 , a__=16 , a__=[128, 256, 384] , a__=[4, 6, 8] , a__=[2, 3, 4] , a__=[16, 16, 16] , a__=0 , a__=[2, 2, 2] , a__=[2, 2, 2] , a__=0.0_2 , a__=True , a__=True , a__=2 , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : Optional[Any] = kernel_size
_lowerCAmelCase : str = stride
_lowerCAmelCase : List[Any] = padding
_lowerCAmelCase : Tuple = hidden_sizes
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Any = depths
_lowerCAmelCase : List[Any] = key_dim
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : List[Any] = attention_ratio
_lowerCAmelCase : Any = mlp_ratio
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Any = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : List[Any] = initializer_range
def __A ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = LevitModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ )
_lowerCAmelCase : List[Any] = (self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase : Tuple = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_lowerCAmelCase : Tuple = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = LevitForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = config_and_inputs
_lowerCAmelCase : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[Any] = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : str = False
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = LevitModelTester(self )
_lowerCAmelCase : str = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def __A ( self ):
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(a__ )
_lowerCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __A ( self ):
def check_hidden_states_output(a__ , a__ , a__ ):
_lowerCAmelCase : Any = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Optional[Any] = outputs.hidden_states
_lowerCAmelCase : List[str] = len(self.model_tester.depths ) + 1
self.assertEqual(len(a__ ) , a__ )
_lowerCAmelCase : Dict = (self.model_tester.image_size, self.model_tester.image_size)
_lowerCAmelCase , _lowerCAmelCase : List[str] = image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase : Union[str, Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_lowerCAmelCase : Optional[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : str = True
check_hidden_states_output(a__ , a__ , a__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __A ( self ):
pass
def __A ( self , a__ , a__ , a__=False ):
_lowerCAmelCase : Dict = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def __A ( self ):
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_lowerCAmelCase : Dict = model_class(a__ )
model.to(a__ )
model.train()
_lowerCAmelCase : Optional[Any] = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_lowerCAmelCase : Optional[Any] = model(**a__ ).loss
loss.backward()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(a__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_lowerCAmelCase : List[str] = model_class(a__ )
model.gradient_checkpointing_enable()
model.to(a__ )
model.train()
_lowerCAmelCase : Optional[Any] = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_lowerCAmelCase : Tuple = model(**a__ ).loss
loss.backward()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
_lowerCAmelCase : Optional[int] = problem_type["""title"""]
_lowerCAmelCase : List[Any] = problem_type["""num_labels"""]
_lowerCAmelCase : Optional[int] = model_class(a__ )
model.to(a__ )
model.train()
_lowerCAmelCase : Optional[int] = self._prepare_for_class(a__ , a__ , return_labels=a__ )
if problem_type["num_labels"] > 1:
_lowerCAmelCase : str = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
_lowerCAmelCase : Any = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a__ ) as warning_list:
_lowerCAmelCase : Optional[int] = model(**a__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def __A ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = LevitModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_lowerCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __A ( self ):
_lowerCAmelCase : int = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
a__ )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : List[str] = prepare_img()
_lowerCAmelCase : int = image_processor(images=a__ , return_tensors="""pt""" ).to(a__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Any = model(**a__ )
# verify the logits
_lowerCAmelCase : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCAmelCase : Dict = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
| 44
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 0
|
"""simple docstring"""
from collections import deque
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a ):
__a = process_name # process name
__a = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__a = arrival_time
__a = burst_time # remaining burst time
__a = 0 # total time of the process wait in ready queue
__a = 0 # time from arrival time to completion time
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , ):
# total number of mlfq's queues
__a = number_of_queues
# time slice of queues that round robin algorithm applied
__a = time_slices
# unfinished process is in this ready_queue
__a = queue
# current time
__a = current_time
# finished process is in this sequence queue
__a = deque()
def __UpperCAmelCase ( self ):
__a = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __UpperCAmelCase ( self , _a ):
__a = []
for i in range(len(_a ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __UpperCAmelCase ( self , _a ):
__a = []
for i in range(len(_a ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __UpperCAmelCase ( self , _a ):
__a = []
for i in range(len(_a ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __UpperCAmelCase ( self , _a ):
return [q.burst_time for q in queue]
def __UpperCAmelCase ( self , _a ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCAmelCase ( self , _a ):
__a = deque() # sequence deque of finished process
while len(_a ) != 0:
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_a )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__a = 0
# set the process's turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# set the completion time
__a = self.current_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCAmelCase ( self , _a , _a ):
__a = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_a ) ):
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_a )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__a = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_a )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__a = 0
# set the finish time
__a = self.current_time
# update the process' turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCAmelCase ( self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__a , __a = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowercase_ = Process("P1", 0, 5_3)
lowercase_ = Process("P2", 0, 1_7)
lowercase_ = Process("P3", 0, 6_8)
lowercase_ = Process("P4", 0, 2_4)
lowercase_ = 3
lowercase_ = [1_7, 2_5]
lowercase_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowercase_ = Process("P1", 0, 5_3)
lowercase_ = Process("P2", 0, 1_7)
lowercase_ = Process("P3", 0, 6_8)
lowercase_ = Process("P4", 0, 2_4)
lowercase_ = 3
lowercase_ = [1_7, 2_5]
lowercase_ = deque([Pa, Pa, Pa, Pa])
lowercase_ = MLFQ(number_of_queues, time_slices, queue, 0)
lowercase_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 45
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
SCREAMING_SNAKE_CASE__ = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase = None , **lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase = 1
lowerCAmelCase = len(self.sp_model ) + self.fairseq_offset
lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[Any]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase ) -> int:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ) -> Optional[Any]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ) -> str:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase = self.sp_model.PieceToId(lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , lowercase ) -> List[str]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , lowercase ) -> List[Any]:
lowerCAmelCase = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 46
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = '''RegNetConfig'''
# Base docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = '''tabby, tabby cat'''
_lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , )
__UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) )
__UpperCamelCase : Dict = self.normalization(_UpperCAmelCase )
__UpperCamelCase : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = config.num_channels
__UpperCamelCase : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def a_ (self , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) )
__UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" )
__UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase )
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
__UpperCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def a_ (self , _UpperCAmelCase ) -> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase )
for layer_module in self.attention:
__UpperCamelCase : str = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1
__UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : List[Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase : Optional[Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ),
]
__UpperCamelCase : Dict = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Tuple = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : str = in_channels != out_channels or stride != 1
__UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : Union[str, Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCamelCase : Union[str, Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ),
]
__UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> int:
__UpperCamelCase : str = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Any = layer_module(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ),
*[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def a_ (self , _UpperCAmelCase ) -> Any:
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase : Any = hidden_states + (hidden_state,)
__UpperCamelCase : Any = stage_module(_UpperCAmelCase )
if output_hidden_states:
__UpperCamelCase : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
A = RegNetConfig
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = config
__UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" )
__UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" )
__UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
@unpack_inputs
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : str = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : List[str] = encoder_outputs[0]
__UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
__UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = RegNetConfig
A = "regnet"
A = "pixel_values"
@property
def a_ (self ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Tuple = self.regnet(
pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = config.num_labels
__UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
# classification head
__UpperCamelCase : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Dict = self.regnet(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase )
__UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase )
__UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
if not return_dict:
__UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 298
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
"nielsr/canine-s": 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCamelCase : Tuple = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCamelCase : Any = 0
lowerCamelCase : Union[str, Any] = 0XE000
lowerCamelCase : Optional[Any] = 0XE001
lowerCamelCase : Union[str, Any] = 0XE002
lowerCamelCase : str = 0XE003
lowerCamelCase : Union[str, Any] = 0XE004
# Maps special codepoints to human-readable names.
lowerCamelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCamelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A__ ( A__ ):
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , _a : Optional[int]=chr(_a ) , _a : Optional[Any]=chr(_a ) , _a : List[str]=chr(_a ) , _a : Any=chr(_a ) , _a : str=chr(_a ) , _a : str=chr(_a ) , _a : int=False , _a : Any=2048 , **_a : int , ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , model_max_length=_a , **_a , )
# Creates a mapping for looking up the IDs of special symbols.
_SCREAMING_SNAKE_CASE ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_SCREAMING_SNAKE_CASE =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_SCREAMING_SNAKE_CASE ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_SCREAMING_SNAKE_CASE =UNICODE_VOCAB_SIZE
_SCREAMING_SNAKE_CASE =len(self._special_codepoints )
@property
def A ( self : Dict ) -> int:
'''simple docstring'''
return self._unicode_vocab_size
def A ( self : List[Any] , _a : str ) -> List[str]:
'''simple docstring'''
return list(_a )
def A ( self : int , _a : str ) -> int:
'''simple docstring'''
try:
return ord(_a )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def A ( self : int , _a : int ) -> str:
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_a )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def A ( self : Optional[int] , _a : List[Any] ) -> Tuple:
'''simple docstring'''
return "".join(_a )
def A ( self : Optional[Any] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def A ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
_SCREAMING_SNAKE_CASE =[1] + ([0] * len(_a )) + [1]
if token_ids_a is not None:
result += ([0] * len(_a )) + [1]
return result
def A ( self : Optional[Any] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def A ( self : Tuple , _a : str , _a : Optional[str] = None ) -> Any:
'''simple docstring'''
return ()
| 47
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = torch.exp(snake_case__ )
__UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i)
__UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case__ ) - B / A
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
__UpperCamelCase : Any = config.output_attentions
__UpperCamelCase : Dict = config.output_hidden_states
__UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )]
def a_ (self , _UpperCAmelCase ) -> int:
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase : str = x
else:
__UpperCamelCase : List[Any] = x
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ()
__UpperCamelCase : Tuple = ()
__UpperCamelCase : Dict = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase : Tuple = all_hidden_states + (hidden_states,)
__UpperCamelCase : Optional[int] = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],)
__UpperCamelCase : Any = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : int = current_outputs + (all_attentions,)
__UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase : Dict = highway_exit[0]
__UpperCamelCase : Any = entropy(_UpperCAmelCase )
__UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
__UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase : int = all_hidden_states + (hidden_states,)
__UpperCamelCase : Dict = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : Optional[int] = outputs + (all_attentions,)
__UpperCamelCase : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = config
__UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase )
__UpperCamelCase : str = BertPooler(_UpperCAmelCase )
self.init_weights()
def a_ (self ) -> Any:
self.encoder.init_highway_pooler(self.pooler )
def a_ (self ) -> Optional[int]:
return self.embeddings.word_embeddings
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : int = value
def a_ (self , _UpperCAmelCase ) -> Tuple:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__UpperCamelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase : Any = encoder_attention_mask[:, None, None, :]
__UpperCamelCase : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase : Optional[int] = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__UpperCamelCase : Union[str, Any] = encoder_outputs[0]
__UpperCamelCase : Any = self.pooler(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : Tuple = message
__UpperCamelCase : Union[str, Any] = exit_layer # start from 1!
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__()
__UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase )
__UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def a_ (self , _UpperCAmelCase ) -> Any:
# Pooler
__UpperCamelCase : Optional[int] = encoder_outputs[0]
__UpperCamelCase : str = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase : Dict = bmodel_output[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Any = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Any:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : List[Any] = config.num_labels
__UpperCamelCase : List[Any] = config.num_hidden_layers
__UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase )
__UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int:
__UpperCamelCase : int = self.num_layers
try:
__UpperCamelCase : Tuple = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase : str = outputs[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Dict = self.classifier(_UpperCAmelCase )
__UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase : int = e.message
__UpperCamelCase : Optional[Any] = e.exit_layer
__UpperCamelCase : Optional[int] = outputs[0]
if not self.training:
__UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : List[str] = MSELoss()
__UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Dict = CrossEntropyLoss()
__UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase : List[Any] = []
for highway_exit in outputs[-1]:
__UpperCamelCase : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Union[str, Any] = MSELoss()
__UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Optional[Any] = CrossEntropyLoss()
__UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
__UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase : Dict = (loss,) + outputs
if not self.training:
__UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 298
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ : Any = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE__ : Tuple = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '▁'
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = VOCAB_FILES_NAMES
lowerCamelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[int] = ["""input_ids""", """token_type_ids"""]
lowerCamelCase_ : List[str] = FNetTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="<unk>" , UpperCamelCase__="[SEP]" , UpperCamelCase__="<pad>" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , **UpperCamelCase__ , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCamelCase : Any = (
AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else mask_token
)
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : Union[str, Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : Optional[Any] = vocab_file
lowerCamelCase : List[Any] = False if not self.vocab_file else True
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : int = [self.sep_token_id]
lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : List[str] = [self.sep_token_id]
lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : int = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 48
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = '''config.json'''
_lowerCAmelCase = '''diffusion_pytorch_model.bin'''
_lowerCAmelCase = '''diffusion_flax_model.msgpack'''
_lowerCAmelCase = '''model.onnx'''
_lowerCAmelCase = '''diffusion_pytorch_model.safetensors'''
_lowerCAmelCase = '''weights.pb'''
_lowerCAmelCase = '''https://huggingface.co'''
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = '''diffusers_modules'''
_lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_lowerCAmelCase = ['''fp16''', '''non-ema''']
_lowerCAmelCase = '''.self_attn'''
| 298
| 0
|
from string import ascii_uppercase
__snake_case :str = {str(ord(c) - 55): c for c in ascii_uppercase}
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
__a = ''''''
__a = 0
__a = 0
while div != 1:
__a , __a = divmod(_UpperCAmelCase , _UpperCAmelCase )
if base >= 11 and 9 < mod < 36:
__a = ALPHABET_VALUES[str(_UpperCAmelCase )]
else:
__a = str(_UpperCAmelCase )
new_value += actual_value
__a = num // base
__a = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_UpperCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 49
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase__ : Optional[Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase__ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase__ : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowerCamelCase__ : str = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase__ : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ : List[Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase__ : int = key.replace(F"""block{idx}""" , F"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCamelCase__ : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCamelCase__ : Dict = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCamelCase__ : Any = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCamelCase__ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCamelCase__ : Tuple = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCamelCase__ : List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase__ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowerCamelCase__ : str = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase__ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCamelCase__ : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCamelCase__ : List[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase__ : str = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase__ : Dict = key.replace('module.last_layer_depth' , 'head.head' )
lowerCamelCase__ : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ : Any = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ : Dict = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Optional[int]:
lowerCamelCase__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase__ : Any = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase__ : str = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowerCamelCase__ : Dict = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowerCamelCase__ : List[str] = model(_UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ : List[Any] = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCamelCase__ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCAmelCase : int = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298
| 0
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def A (__A : Tuple , __A : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
UpperCAmelCase_ = DatasetInfosDict.from_directory(__A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def A (__A : Optional[int] , __A : DatasetInfo ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = str(__A )
dataset_info.write_to_directory(__A )
UpperCAmelCase_ = DatasetInfo.from_directory(__A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__A , '''dataset_info.json''' ) )
def A () -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
UpperCAmelCase_ = dataset_info._to_yaml_dict()
assert sorted(__A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCAmelCase_ = yaml.safe_dump(__A )
UpperCAmelCase_ = yaml.safe_load(__A )
assert dataset_info_yaml_dict == reloaded
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = DatasetInfo()
UpperCAmelCase_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def A (__A : Optional[Any] , __A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = str(__A )
dataset_infos_dict.write_to_directory(__A )
UpperCAmelCase_ = DatasetInfosDict.from_directory(__A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__A , '''README.md''' ) )
| 51
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
| 0
|
import math
import flax.linen as nn
import jax.numpy as jnp
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1.0e4 , _lowerCAmelCase = False , _lowerCAmelCase = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
UpperCamelCase : List[str] = float(embedding_dim // 2 )
UpperCamelCase : List[str] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCamelCase : List[Any] = min_timescale * jnp.exp(jnp.arange(_lowerCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCamelCase : List[str] = jnp.expand_dims(_lowerCAmelCase , 1 ) * jnp.expand_dims(_lowerCAmelCase , 0 )
# scale embeddings
UpperCamelCase : int = scale * emb
if flip_sin_to_cos:
UpperCamelCase : Union[str, Any] = jnp.concatenate([jnp.cos(_lowerCAmelCase ), jnp.sin(_lowerCAmelCase )] , axis=1 )
else:
UpperCamelCase : Any = jnp.concatenate([jnp.sin(_lowerCAmelCase ), jnp.cos(_lowerCAmelCase )] , axis=1 )
UpperCamelCase : Optional[Any] = jnp.reshape(_lowerCAmelCase , [jnp.shape(_lowerCAmelCase )[0], embedding_dim] )
return signal
class A__ ( nn.Module ):
_UpperCAmelCase :int = 3_2
_UpperCAmelCase :jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(A_ )
UpperCamelCase : Optional[int] = nn.silu(A_ )
UpperCamelCase : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(A_ )
return temb
class A__ ( nn.Module ):
_UpperCAmelCase :int = 3_2
_UpperCAmelCase :bool = False
_UpperCAmelCase :float = 1
@nn.compact
def __call__( self , A_ ):
'''simple docstring'''
return get_sinusoidal_embeddings(
A_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 52
|
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCAmelCase ( snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
'''simple docstring'''
from math import factorial
class snake_case :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : Tuple ):
__UpperCamelCase = real
if isinstance(__A , __A ):
__UpperCamelCase = [1] * rank
else:
__UpperCamelCase = rank
def __repr__( self : Any ):
return (
f'''{self.real}+'''
f'''{'+'.join(str(__A )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __A )
def __add__( self : List[Any] , __A : Any ):
if not isinstance(__A , __A ):
return Dual(self.real + other , self.duals )
__UpperCamelCase = self.duals.copy()
__UpperCamelCase = other.duals.copy()
if len(__A ) > len(__A ):
o_dual.extend([1] * (len(__A ) - len(__A )) )
elif len(__A ) < len(__A ):
s_dual.extend([1] * (len(__A ) - len(__A )) )
__UpperCamelCase = []
for i in range(len(__A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __A )
SCREAMING_SNAKE_CASE_ : Tuple =__add__
def __sub__( self : List[str] , __A : List[Any] ):
return self + other * -1
def __mul__( self : Tuple , __A : List[str] ):
if not isinstance(__A , __A ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __A )
__UpperCamelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =__mul__
def __truediv__( self : int , __A : List[str] ):
if not isinstance(__A , __A ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __A )
raise ValueError
def __floordiv__( self : Any , __A : List[Any] ):
if not isinstance(__A , __A ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __A )
raise ValueError
def __pow__( self : str , __A : Optional[int] ):
if n < 0 or isinstance(__A , __A ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
__UpperCamelCase = self
for _ in range(n - 1 ):
x *= self
return x
def lowercase__ ( __lowercase : str , __lowercase : Optional[int] , __lowercase : List[str] ) -> Dict:
"""simple docstring"""
if not callable(__lowercase ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(__lowercase , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(__lowercase , __lowercase ):
raise ValueError('differentiate() requires an int as input for order' )
__UpperCamelCase = Dual(__lowercase , 1 )
__UpperCamelCase = func(__lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase__ ( __lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 53
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298
| 0
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__SCREAMING_SNAKE_CASE = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCAmelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCAmelCase__ , )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
__SCREAMING_SNAKE_CASE = torch.device("cpu" )
__SCREAMING_SNAKE_CASE = 0
elif is_sagemaker_model_parallel_available():
__SCREAMING_SNAKE_CASE = smp.local_rank()
__SCREAMING_SNAKE_CASE = torch.device("cuda" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__SCREAMING_SNAKE_CASE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase__ )
return device
@property
def UpperCAmelCase_ ( self : Dict ) -> Any:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase_ ( self : Tuple ) -> int:
return False
| 54
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
a_ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Optional[int] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
a_ : Dict = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
a_ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = LxmertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(UpperCamelCase , normalizer_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**UpperCamelCase )
lowerCamelCase_ = do_lower_case
def snake_case ( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 55
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a : int = logging.get_logger(__name__)
class a ( _lowerCamelCase , _lowerCamelCase ):
snake_case_ = "maskformer-swin"
snake_case_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , lowercase_ : List[str]=224 , lowercase_ : List[str]=4 , lowercase_ : List[str]=3 , lowercase_ : Tuple=96 , lowercase_ : Any=[2, 2, 6, 2] , lowercase_ : Any=[3, 6, 12, 24] , lowercase_ : Any=7 , lowercase_ : int=4.0 , lowercase_ : str=True , lowercase_ : List[str]=0.0 , lowercase_ : int=0.0 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[int]="gelu" , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=0.02 , lowercase_ : int=1e-5 , lowercase_ : Any=None , lowercase_ : int=None , **lowercase_ : Tuple , ):
super().__init__(**lowercase_ )
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = len(lowercase_ )
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
snake_case_ = ['''stem'''] + [F"stage{idx}" for idx in range(1 , len(lowercase_ ) + 1 )]
snake_case_ ,snake_case_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 56
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowerCamelCase ( _UpperCamelCase = "AAPL" ):
'''simple docstring'''
__lowerCAmelCase = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
__lowerCAmelCase = BeautifulSoup(requests.get(_UpperCamelCase ).text , "html.parser" )
__lowerCAmelCase = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 57
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
'''simple docstring'''
def __init__( self , A , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.02 , A=3 , A=0.6 , A=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = mask_ratio
_SCREAMING_SNAKE_CASE = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_( self ) -> Optional[Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case_( self , A , A , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = TFViTMAEModel(config=A )
_SCREAMING_SNAKE_CASE = model(A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_( self , A , A , A ) -> List[Any]:
_SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining(A )
_SCREAMING_SNAKE_CASE = model(A , training=A )
# expected sequence length = num_patches
_SCREAMING_SNAKE_CASE = (self.image_size // self.patch_size) ** 2
_SCREAMING_SNAKE_CASE = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining(A )
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = model(A , training=A )
_SCREAMING_SNAKE_CASE = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = TFViTMAEModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def snake_case_( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def snake_case_( self ) -> str:
pass
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , tf.keras.layers.Layer ) )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
_SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def snake_case_( self ) -> List[Any]:
# make the mask reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A , A )
_SCREAMING_SNAKE_CASE = model(A , noise=A )
_SCREAMING_SNAKE_CASE = copy.deepcopy(self._prepare_for_class(A , A ) )
_SCREAMING_SNAKE_CASE = model(**A , noise=A )
_SCREAMING_SNAKE_CASE = outputs_dict[0].numpy()
_SCREAMING_SNAKE_CASE = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case_( self ) -> List[Any]:
# make the mask reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(A ):
_SCREAMING_SNAKE_CASE = {}
for k, v in inputs_dict.items():
if tf.is_tensor(A ):
_SCREAMING_SNAKE_CASE = v.numpy()
else:
_SCREAMING_SNAKE_CASE = np.array(A )
return inputs_np_dict
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A , A )
_SCREAMING_SNAKE_CASE = prepare_numpy_arrays(A )
_SCREAMING_SNAKE_CASE = model(A , noise=A )
_SCREAMING_SNAKE_CASE = model(**A , noise=A )
self.assert_outputs_same(A , A )
def snake_case_( self , A , A , A ) -> List[Any]:
# make masks reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_SCREAMING_SNAKE_CASE = tf.constant(A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_SCREAMING_SNAKE_CASE = tf_noise
super().check_pt_tf_models(A , A , A )
def snake_case_( self ) -> Optional[int]:
# make mask reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(A )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(A , A ),)
if isinstance(A , A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(A , """_keras_serializable""" , A )
}
_SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
_SCREAMING_SNAKE_CASE = main_layer_class(A )
_SCREAMING_SNAKE_CASE = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_SCREAMING_SNAKE_CASE = tf.keras.Model(A , outputs=main_layer(A ) )
_SCREAMING_SNAKE_CASE = model(A )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = os.path.join(A , """keras_model.h5""" )
model.save(A )
_SCREAMING_SNAKE_CASE = tf.keras.models.load_model(
A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(A , tf.keras.Model )
_SCREAMING_SNAKE_CASE = model(A )
self.assert_outputs_same(A , A )
@slow
def snake_case_( self ) -> Tuple:
# make mask reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A , A )
_SCREAMING_SNAKE_CASE = model(A , noise=A )
if model_class.__name__ == "TFViTMAEModel":
_SCREAMING_SNAKE_CASE = outputs.last_hidden_state.numpy()
_SCREAMING_SNAKE_CASE = 0
else:
_SCREAMING_SNAKE_CASE = outputs.logits.numpy()
_SCREAMING_SNAKE_CASE = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A , saved_model=A )
_SCREAMING_SNAKE_CASE = model_class.from_pretrained(A )
_SCREAMING_SNAKE_CASE = model(A , noise=A )
if model_class.__name__ == "TFViTMAEModel":
_SCREAMING_SNAKE_CASE = after_outputs["""last_hidden_state"""].numpy()
_SCREAMING_SNAKE_CASE = 0
else:
_SCREAMING_SNAKE_CASE = after_outputs["""logits"""].numpy()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A , 1e-5 )
def snake_case_( self ) -> Dict:
# make mask reproducible
np.random.seed(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A , A )
_SCREAMING_SNAKE_CASE = model(A , noise=A )
_SCREAMING_SNAKE_CASE = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(A )
_SCREAMING_SNAKE_CASE = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_SCREAMING_SNAKE_CASE = model_class.from_config(model.config )
_SCREAMING_SNAKE_CASE = new_model(A ) # Build model
new_model.set_weights(model.get_weights() )
_SCREAMING_SNAKE_CASE = new_model(A , noise=A )
self.assert_outputs_same(A , A )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def snake_case_( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def snake_case_( self ) -> Optional[Any]:
pass
@slow
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(A )
def lowerCamelCase ( ) ->Any:
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_( self ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def snake_case_( self ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=A , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_SCREAMING_SNAKE_CASE = ViTMAEConfig()
_SCREAMING_SNAKE_CASE = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_SCREAMING_SNAKE_CASE = np.random.uniform(size=(1, num_patches) )
# forward pass
_SCREAMING_SNAKE_CASE = model(**A , noise=A )
# verify the logits
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , A )
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , A , atol=1e-4 )
| 58
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
| 0
|
import math
from numpy import inf
from scipy.integrate import quad
def UpperCamelCase ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("math domain error" )
return quad(__lowerCamelCase , 0 , __lowerCamelCase , args=(__lowerCamelCase) )[0]
def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float ):
return math.pow(__lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 59
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298
| 0
|
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : str = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : Union[str, Any] = {
'''Salesforce/codegen-350M-mono''': 2_048,
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = CodeGenTokenizer
def __init__( self : Optional[int] , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple="<|endoftext|>" , UpperCamelCase_ : int="<|endoftext|>" , UpperCamelCase_ : Tuple="<|endoftext|>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Dict , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop('''add_bos_token''' , UpperCamelCase_ ):
lowerCAmelCase : str = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowerCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowerCAmelCase : List[str] = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
lowerCAmelCase : Optional[Any] = add_prefix_space
lowerCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = add_prefix_space
def lowerCamelCase__ ( self : int , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Any ):
lowerCAmelCase : Dict = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : Tuple = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[List[str]] = None , **UpperCamelCase_ : Tuple , ):
lowerCAmelCase : List[str] = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
lowerCAmelCase : str = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
def find_re(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
lowerCAmelCase : Union[str, Any] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCAmelCase : Optional[Any] = list(re.finditer('''^print''' , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
lowerCAmelCase : int = completion[: prints[1].start()]
lowerCAmelCase : List[str] = list(re.finditer('''^def''' , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
lowerCAmelCase : Union[str, Any] = completion[: defs[1].start()]
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Tuple = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 60
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.