code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase( __lowerCamelCase ):
lowercase__ = "informer"
lowercase__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , __a = None , __a = None , __a = "student_t" , __a = "nll" , __a = 1 , __a = None , __a = "mean" , __a = 0 , __a = 0 , __a = 0 , __a = 0 , __a = None , __a = None , __a = 64 , __a = 32 , __a = 32 , __a = 2 , __a = 2 , __a = 2 , __a = 2 , __a = True , __a = "gelu" , __a = 0.05 , __a = 0.1 , __a = 0.1 , __a = 0.1 , __a = 0.1 , __a = 1_00 , __a = 0.02 , __a=True , __a = "prob" , __a = 5 , __a = True , **__a , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = prediction_length
_UpperCamelCase = context_length or prediction_length
_UpperCamelCase = distribution_output
_UpperCamelCase = loss
_UpperCamelCase = input_size
_UpperCamelCase = num_time_features
_UpperCamelCase = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_UpperCamelCase = scaling
_UpperCamelCase = num_dynamic_real_features
_UpperCamelCase = num_static_real_features
_UpperCamelCase = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__a) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''')
_UpperCamelCase = cardinality
else:
_UpperCamelCase = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__a) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''')
_UpperCamelCase = embedding_dimension
else:
_UpperCamelCase = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
_UpperCamelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCamelCase = input_size * len(self.lags_sequence) + self._number_of_features
_UpperCamelCase = d_model
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = decoder_layers
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = use_cache
# Informer
_UpperCamelCase = attention_type
_UpperCamelCase = sampling_factor
_UpperCamelCase = distil
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 194 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: int , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: str , __UpperCAmelCase: Any ) -> List[str]:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : Tuple = '''lm_head'''
UpperCamelCase__ : Optional[int] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
UpperCamelCase__ : List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
UpperCamelCase__ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase__ : List[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : List[str] = value
elif weight_type == "weight_v":
UpperCamelCase__ : str = value
elif weight_type == "bias":
UpperCamelCase__ : Tuple = value
else:
UpperCamelCase__ : int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : str = fairseq_model.state_dict()
UpperCamelCase__ : Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : int = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ : Tuple = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
UpperCamelCase__ : Optional[Any] = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
UpperCamelCase__ : List[str] = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ : Dict = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ : List[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Optional[Any] = '''weight'''
else:
UpperCamelCase__ : List[Any] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Any , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Tuple ) -> Optional[int]:
UpperCamelCase__ : List[str] = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ : List[str] = name.split('''.''' )
UpperCamelCase__ : str = int(items[0] )
UpperCamelCase__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase__ : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase__ : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict=None , __UpperCAmelCase: Optional[Any]=None , __UpperCAmelCase: Optional[int]=True ) -> Union[str, Any]:
if config_path is not None:
UpperCamelCase__ : str = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
UpperCamelCase__ : List[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : str = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : Any = target_dict.pad_index
UpperCamelCase__ : str = target_dict.bos_index
UpperCamelCase__ : Any = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols )
UpperCamelCase__ : List[str] = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCamelCase__ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Optional[Any] = 42
UpperCamelCase__ : List[str] = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Dict = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
UpperCamelCase__ : List[Any] = True if config.feat_extract_norm == '''layer''' else False
UpperCamelCase__ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
UpperCamelCase__ : str = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = UniSpeechForCTC(__UpperCAmelCase )
else:
UpperCamelCase__ : Any = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCamelCase__ : Tuple = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCAmelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 201 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCamelCase ( lowerCamelCase__ ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def __A ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = DistilBertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = DistilBertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = DistilBertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_labels
A__ = DistilBertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_labels
A__ = DistilBertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_choices
A__ = DistilBertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
A__ = self.prepare_config_and_inputs()
(A__) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowerCAmelCase : List[Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase : Optional[Any] = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Any = True
lowerCAmelCase : Dict = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Optional[int] = True
def __A ( self ):
A__ = DistilBertModelTester(self )
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ )
@slow
def __A ( self ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DistilBertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
@require_torch_gpu
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
A__ = True
A__ = model_class(config=UpperCAmelCase__ )
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = torch.jit.trace(
UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "traced_model.pt" ) )
A__ = torch.jit.load(os.path.join(UpperCAmelCase__ , "traced_model.pt" ) , map_location=UpperCAmelCase__ )
loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def __A ( self ):
A__ = DistilBertModel.from_pretrained("distilbert-base-uncased" )
A__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
A__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
A__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1e-4 ) )
| 354 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : List[str] = """linear"""
lowerCAmelCase : int = """cosine"""
lowerCAmelCase : Dict = """cosine_with_restarts"""
lowerCAmelCase : Optional[Any] = """polynomial"""
lowerCAmelCase : Dict = """constant"""
lowerCAmelCase : Any = """constant_with_warmup"""
lowerCAmelCase : Union[str, Any] = """piecewise_constant"""
def UpperCamelCase ( _A : Optimizer , _A : int = -1 )-> Dict:
"""simple docstring"""
return LambdaLR(_A , lambda _A : 1 , last_epoch=_A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int = -1 )-> Optional[Any]:
"""simple docstring"""
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1.0 , _A ) )
return 1.0
return LambdaLR(_A , _A , last_epoch=_A )
def UpperCamelCase ( _A : Optimizer , _A : str , _A : int = -1 )-> Dict:
"""simple docstring"""
A__ = {}
A__ = step_rules.split("," )
for rule_str in rule_list[:-1]:
A__ , A__ = rule_str.split(":" )
A__ = int(_A )
A__ = float(_A )
A__ = value
A__ = float(rule_list[-1] )
def create_rules_function(_A : Any , _A : Optional[int] ):
def rule_func(_A : int ) -> float:
A__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ = create_rules_function(_A , _A )
return LambdaLR(_A , _A , last_epoch=_A )
def UpperCamelCase ( _A : Any , _A : Union[str, Any] , _A : str , _A : str=-1 )-> Tuple:
"""simple docstring"""
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int , _A : float = 0.5 , _A : int = -1 )-> Any:
"""simple docstring"""
def lr_lambda(_A : Tuple ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_A ) * 2.0 * progress )) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int , _A : int = 1 , _A : int = -1 )-> Any:
"""simple docstring"""
def lr_lambda(_A : Tuple ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_A ) * progress) % 1.0) )) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[str] , _A : Tuple=1E-7 , _A : Dict=1.0 , _A : Union[str, Any]=-1 )-> Any:
"""simple docstring"""
A__ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ = lr_init - lr_end
A__ = num_training_steps - num_warmup_steps
A__ = 1 - (current_step - num_warmup_steps) / decay_steps
A__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_A , _A , _A )
UpperCAmelCase_ : Any = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase ( _A : Union[str, SchedulerType] , _A : Optimizer , _A : Optional[str] = None , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 1.0 , _A : int = -1 , )-> Union[str, Any]:
"""simple docstring"""
A__ = SchedulerType(_A )
A__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_A , last_epoch=_A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_A , step_rules=_A , last_epoch=_A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_A , num_warmup_steps=_A , last_epoch=_A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , num_cycles=_A , last_epoch=_A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , power=_A , last_epoch=_A , )
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , last_epoch=_A )
| 198 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class A__ :
"""simple docstring"""
def __init__( self , __snake_case=False , __snake_case=False , __snake_case=6.0 , __snake_case=None , __snake_case=False , __snake_case=False , __snake_case=None , __snake_case="fp4" , __snake_case=False , **__snake_case , ):
snake_case = load_in_abit
snake_case = load_in_abit
snake_case = llm_inta_threshold
snake_case = llm_inta_skip_modules
snake_case = llm_inta_enable_fpaa_cpu_offload
snake_case = llm_inta_has_fpaa_weight
snake_case = bnb_abit_quant_type
snake_case = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
snake_case = torch.floataa
elif isinstance(__snake_case , __snake_case ):
snake_case = getattr(__snake_case , __snake_case )
elif isinstance(__snake_case , torch.dtype ):
snake_case = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def a_ ( self ):
if not isinstance(self.llm_inta_threshold , __snake_case ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __snake_case ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __snake_case ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , __snake_case ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , __snake_case ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , __snake_case ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def a_ ( self ):
return self.load_in_abit or self.load_in_abit
def a_ ( self ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def a_ ( cls , __snake_case , __snake_case , **__snake_case ):
snake_case = cls(**__snake_case )
snake_case = []
for key, value in kwargs.items():
if hasattr(__snake_case , __snake_case ):
setattr(__snake_case , __snake_case , __snake_case )
to_remove.append(__snake_case )
for key in to_remove:
kwargs.pop(__snake_case , __snake_case )
if return_unused_kwargs:
return config, kwargs
else:
return config
def a_ ( self , __snake_case ):
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
snake_case = self.to_dict()
snake_case = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
writer.write(__snake_case )
def a_ ( self ):
snake_case = copy.deepcopy(self.__dict__ )
snake_case = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self ):
return F'''{self.__class__.__name__} {self.to_json_string()}'''
def a_ ( self , __snake_case = True ):
if use_diff is True:
snake_case = self.to_diff_dict()
else:
snake_case = self.to_dict()
return json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n"
def a_ ( self ):
snake_case = self.to_dict()
# get the default config dict
snake_case = BitsAndBytesConfig().to_dict()
snake_case = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
snake_case = value
return serializable_config_dict
| 127 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case = 1.0 , __snake_case = None , ):
super().__init__()
snake_case = initial_learning_rate
snake_case = warmup_steps
snake_case = power
snake_case = decay_schedule_fn
snake_case = name
def __call__( self , __snake_case ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
snake_case = tf.cast(__snake_case , tf.floataa )
snake_case = tf.cast(self.warmup_steps , tf.floataa )
snake_case = global_step_float / warmup_steps_float
snake_case = self.initial_learning_rate * tf.math.pow(__snake_case , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__snake_case , )
def a_ ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 0.0 ,UpperCamelCase_ = 0.9 ,UpperCamelCase_ = 0.999 ,UpperCamelCase_ = 1e-8 ,UpperCamelCase_ = None ,UpperCamelCase_ = None ,UpperCamelCase_ = 0.0 ,UpperCamelCase_ = 1.0 ,UpperCamelCase_ = None ,):
"""simple docstring"""
snake_case = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCamelCase_ ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=UpperCamelCase_ ,)
if num_warmup_steps:
snake_case = WarmUp(
initial_learning_rate=UpperCamelCase_ ,decay_schedule_fn=UpperCamelCase_ ,warmup_steps=UpperCamelCase_ ,)
if weight_decay_rate > 0.0:
snake_case = AdamWeightDecay(
learning_rate=UpperCamelCase_ ,weight_decay_rate=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,epsilon=UpperCamelCase_ ,clipnorm=UpperCamelCase_ ,global_clipnorm=UpperCamelCase_ ,exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] ,include_in_weight_decay=UpperCamelCase_ ,)
else:
snake_case = tf.keras.optimizers.Adam(
learning_rate=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,epsilon=UpperCamelCase_ ,clipnorm=UpperCamelCase_ ,global_clipnorm=UpperCamelCase_ ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case = 0.001 , __snake_case = 0.9 , __snake_case = 0.999 , __snake_case = 1E-7 , __snake_case = False , __snake_case = 0.0 , __snake_case = None , __snake_case = None , __snake_case = "AdamWeightDecay" , **__snake_case , ):
super().__init__(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
snake_case = weight_decay_rate
snake_case = include_in_weight_decay
snake_case = exclude_from_weight_decay
@classmethod
def a_ ( cls , __snake_case ):
snake_case = {'''WarmUp''': WarmUp}
return super(__snake_case , cls ).from_config(__snake_case , custom_objects=__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
super(__snake_case , self )._prepare_local(__snake_case , __snake_case , __snake_case )
snake_case = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def a_ ( self , __snake_case , __snake_case=None , **__snake_case ):
snake_case , snake_case = list(zip(*__snake_case ) )
return super(__snake_case , self ).apply_gradients(zip(__snake_case , __snake_case ) , name=__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
snake_case = apply_state or {}
snake_case = apply_state.get((var_device, var_dtype) )
if coefficients is None:
snake_case = self._fallback_apply_state(__snake_case , __snake_case )
snake_case = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def a_ ( self , __snake_case , __snake_case , __snake_case=None ):
snake_case , snake_case = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
snake_case = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_dense(__snake_case , __snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None ):
snake_case , snake_case = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
snake_case = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_sparse(__snake_case , __snake_case , __snake_case , **__snake_case )
def a_ ( self ):
snake_case = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def a_ ( self , __snake_case ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return False
return True
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self ):
snake_case = []
snake_case = None
@property
def a_ ( self ):
if self._accum_steps is None:
snake_case = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def a_ ( self ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , __snake_case ):
if not self._gradients:
snake_case = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__snake_case ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__snake_case ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(__snake_case )}''' )
for accum_gradient, gradient in zip(self._gradients , __snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__snake_case )
self._accum_steps.assign_add(1 )
def a_ ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__snake_case ) )
| 127 | 1 |
"""simple docstring"""
from math import ceil
def A ( snake_case :str , snake_case :Tuple ) -> str:
__UpperCamelCase = list(range(0 , snake_case ) )
__UpperCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__UpperCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case )
# Missing blocks
__UpperCamelCase = [i for i in blocks if i not in device_map_blocks]
__UpperCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(snake_case ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(snake_case ) )
if len(snake_case ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(snake_case ) )
if len(snake_case ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(snake_case ) )
def A ( snake_case :int , snake_case :str ) -> Any:
__UpperCamelCase = list(range(snake_case ) )
__UpperCamelCase = int(ceil(n_layers / len(snake_case ) ) )
__UpperCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case , snake_case )]
return dict(zip(snake_case , snake_case ) )
| 368 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A ( ) -> Any:
__UpperCamelCase = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 2_0, 'a ' * 3_0, 'b ' * 7],
}
__UpperCamelCase = Dataset.from_dict(snake_case )
return dataset
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = get_dataset()
__UpperCamelCase = make_duplicate_clusters(__UpperCAmelCase , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = get_dataset()
__UpperCamelCase , __UpperCamelCase = deduplicate_dataset(__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
print(__UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , __UpperCAmelCase )
| 263 | 0 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "layer_norm" , SCREAMING_SNAKE_CASE__ = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = only_cross_attention
SCREAMING_SNAKE_CASE__ : List[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE__ : int = AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ : Tuple = AdaLayerNormZero(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
SCREAMING_SNAKE_CASE__ : Any = (
AdaLayerNorm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm
else nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
)
SCREAMING_SNAKE_CASE__ : Tuple = Attention(
query_dim=SCREAMING_SNAKE_CASE__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , upcast_attention=SCREAMING_SNAKE_CASE__ , ) # is self-attn if encoder_hidden_states is none
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : str = None
# 3. Feed-forward
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = FeedForward(SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , final_dropout=SCREAMING_SNAKE_CASE__ )
# let chunk size default to None
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Dict = 0
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = chunk_size
SCREAMING_SNAKE_CASE__ : str = dim
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ) -> Any:
"""simple docstring"""
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE__ : Any = self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.norma(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=hidden_states.dtype )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.norma(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {}
SCREAMING_SNAKE_CASE__ : List[Any] = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ : List[str] = gate_msa.unsqueeze(1 ) * attn_output
SCREAMING_SNAKE_CASE__ : Dict = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
SCREAMING_SNAKE_CASE__ : str = (
self.norma(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE__ )
)
SCREAMING_SNAKE_CASE__ : List[str] = self.attna(
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attn_output + hidden_states
# 3. Feed-forward
SCREAMING_SNAKE_CASE__ : Any = self.norma(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
SCREAMING_SNAKE_CASE__ : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[self.ff(SCREAMING_SNAKE_CASE__ ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
SCREAMING_SNAKE_CASE__ : int = self.ff(SCREAMING_SNAKE_CASE__ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ : Tuple = gate_mlp.unsqueeze(1 ) * ff_output
SCREAMING_SNAKE_CASE__ : Optional[Any] = ff_output + hidden_states
return hidden_states
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = "geglu" , SCREAMING_SNAKE_CASE__ = False , ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] = int(dim * mult )
SCREAMING_SNAKE_CASE__ : Tuple = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
SCREAMING_SNAKE_CASE__ : str = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if activation_fn == "gelu-approximate":
SCREAMING_SNAKE_CASE__ : List[str] = GELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , approximate="""tanh""" )
elif activation_fn == "geglu":
SCREAMING_SNAKE_CASE__ : Optional[Any] = GEGLU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif activation_fn == "geglu-approximate":
SCREAMING_SNAKE_CASE__ : int = ApproximateGELU(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = nn.ModuleList([] )
# project in
self.net.append(SCREAMING_SNAKE_CASE__ )
# project dropout
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
# project out
self.net.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
for module in self.net:
SCREAMING_SNAKE_CASE__ : Dict = module(SCREAMING_SNAKE_CASE__ )
return hidden_states
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "none" ) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = approximate
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.proj(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.gelu(SCREAMING_SNAKE_CASE__ )
return hidden_states
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ , dim_out * 2 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(SCREAMING_SNAKE_CASE__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.proj(SCREAMING_SNAKE_CASE__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.proj(SCREAMING_SNAKE_CASE__ )
return x * torch.sigmoid(1.702 * x )
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , embedding_dim * 2 )
SCREAMING_SNAKE_CASE__ : List[str] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ ) ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 )
SCREAMING_SNAKE_CASE__ : Dict = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale) + shift
return x
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Any = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__ , elementwise_affine=SCREAMING_SNAKE_CASE__ , eps=1E-6 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hidden_dtype=SCREAMING_SNAKE_CASE__ ) ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = emb.chunk(6 , dim=1 )
SCREAMING_SNAKE_CASE__ : Tuple = self.norm(SCREAMING_SNAKE_CASE__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1E-5 ) -> int:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = num_groups
SCREAMING_SNAKE_CASE__ : Any = eps
if act_fn is None:
SCREAMING_SNAKE_CASE__ : Any = None
else:
SCREAMING_SNAKE_CASE__ : Dict = get_activation(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , out_dim * 2 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if self.act:
SCREAMING_SNAKE_CASE__ : Tuple = self.act(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = self.linear(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = emb[:, :, None, None]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = emb.chunk(2 , dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = F.group_norm(SCREAMING_SNAKE_CASE__ , self.num_groups , eps=self.eps )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = x * (1 + scale) + shift
return x
| 25 |
'''simple docstring'''
from math import ceil
def UpperCamelCase_ ( A__ : int = 10_01 ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : int = 2 * i + 1
lowerCAmelCase_ : Tuple = 2 * i
lowerCAmelCase_ : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__A : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 120 | 0 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=False ) -> List[str]:
try:
lowerCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase = strtobool(lowerCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
return unittest.skip("""Test was skipped""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict ) -> str:
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> Dict:
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> str:
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : int=None ) -> Optional[Any]:
if test_case is None:
return partial(lowerCamelCase__ , version=lowerCamelCase__ )
return unittest.skipUnless(is_torch_version(""">=""" , lowerCamelCase__ ) , f'test requires torch version >= {version}' )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(lowerCamelCase__ )
UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(lowerCamelCase__ )
class __snake_case( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = True
@classmethod
def __snake_case ( cls ) -> str:
lowerCAmelCase = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ) -> List[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_lowerCamelCase )
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> Any:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , A_ ) -> Optional[int]:
lowerCAmelCase = mocks if isinstance(_lowerCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
lowerCAmelCase = AcceleratorState()
lowerCAmelCase = tensor[None].clone().to(state.device )
lowerCAmelCase = gather(lowerCamelCase__ ).cpu()
lowerCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase__ ):
return False
return True
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ) -> Any:
lowerCAmelCase = returncode
lowerCAmelCase = stdout
lowerCAmelCase = stderr
async def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
while True:
lowerCAmelCase = await stream.readline()
if line:
callback(lowerCamelCase__ )
else:
break
async def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : int=False , _SCREAMING_SNAKE_CASE : str=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(lowerCamelCase__ ) )
lowerCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase = []
lowerCAmelCase = []
def tee(_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any]="" ):
lowerCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(lowerCamelCase__ )
if not quiet:
print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _SCREAMING_SNAKE_CASE : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _SCREAMING_SNAKE_CASE : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=lowerCamelCase__ , )
return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : str=180 , _SCREAMING_SNAKE_CASE : Union[str, Any]=False , _SCREAMING_SNAKE_CASE : List[str]=True ) -> _RunOutput:
lowerCAmelCase = asyncio.get_event_loop()
lowerCAmelCase = loop.run_until_complete(
_stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) )
lowerCAmelCase = ''' '''.join(lowerCamelCase__ )
if result.returncode > 0:
lowerCAmelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class __snake_case( a__ ):
'''simple docstring'''
pass
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> int:
try:
lowerCAmelCase = subprocess.check_output(lowerCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase__ , """decode""" ):
lowerCAmelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e | 354 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 999_999_999
lowerCAmelCase = 0
lowerCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase = remaining_time[j]
lowerCAmelCase = j
lowerCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase = remaining_time[short]
if minm == 0:
lowerCAmelCase = 999_999_999
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase = False
# Find finish time of current process
lowerCAmelCase = increment_time + 1
# Calculate waiting time
lowerCAmelCase = finish_time - arrival_time[short]
lowerCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = total_waiting_time + waiting_time[i]
lowerCAmelCase = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
UpperCAmelCase = int(input())
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
UpperCAmelCase , UpperCAmelCase = map(int, input().split())
UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase = burst_time
UpperCAmelCase = no_of_processes
UpperCAmelCase = waiting_time
UpperCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs) | 187 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = parent
def lowercase_ ( self ) -> str:
return {}
def lowerCAmelCase ( )-> List[str]:
lowerCAmelCase_ : Any = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
lowerCAmelCase_ : Optional[int] = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[str] = MarkupLMFeatureExtractionTester(self )
@property
def lowercase_ ( self ) -> Tuple:
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowercase_ ( self ) -> List[str]:
# Initialize feature_extractor
lowerCAmelCase_ : Tuple = self.feature_extraction_class()
# Test not batched input
lowerCAmelCase_ : Optional[int] = get_html_strings()[0]
lowerCAmelCase_ : List[Any] = feature_extractor(__lowercase )
# fmt: off
lowerCAmelCase_ : List[str] = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
lowerCAmelCase_ : str = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , __lowercase )
self.assertEqual(encoding.xpaths , __lowercase )
# Test batched
lowerCAmelCase_ : str = get_html_strings()
lowerCAmelCase_ : int = feature_extractor(__lowercase )
# fmt: off
lowerCAmelCase_ : Optional[Any] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
lowerCAmelCase_ : List[str] = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __lowercase )
self.assertEqual(encoding.xpaths , __lowercase ) | 262 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase ) | 262 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCamelCase__ = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase__ = {F"funnel-transformer/{name}": 512 for name in _model_names}
lowerCamelCase__ = {F"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_INIT_CONFIGURATION
__A = FunnelTokenizer
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = 2
def __init__( self : Tuple , lowercase_ : Any=None , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , lowercase_ : List[str]="<unk>" , lowercase_ : List[Any]="<sep>" , lowercase_ : int="<pad>" , lowercase_ : Dict="<cls>" , lowercase_ : int="<mask>" , lowercase_ : Any="<s>" , lowercase_ : Tuple="</s>" , lowercase_ : List[str]=True , lowercase_ : Any=True , lowercase_ : str=None , lowercase_ : Dict="##" , **lowercase_ : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , clean_text=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , wordpieces_prefix=lowercase_ , **lowercase_ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowercase_) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase_) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(lowercase_ , normalizer_state.pop("type"))
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**lowercase_)
_UpperCamelCase = do_lower_case
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=None) -> str:
"""simple docstring"""
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCamelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
| 352 | from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 63 | 0 |
import numpy as np
def a ( A__ : np.ndarray , A__ : np.ndarray , A__ : float = 1e-12 , A__ : int = 100 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(A__ )[0] == np.shape(A__ )[1]
# Ensure proper dimensionality.
assert np.shape(A__ )[0] == np.shape(A__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(A__ ) == np.iscomplexobj(A__ )
_lowercase =np.iscomplexobj(A__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(A__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowercase =False
_lowercase =0
_lowercase =0
_lowercase =1e12
while not convergence:
# Multiple matrix by the vector.
_lowercase =np.dot(A__ , A__ )
# Normalize the resulting output vector.
_lowercase =w / np.linalg.norm(A__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowercase =vector.conj().T if is_complex else vector.T
_lowercase =np.dot(A__ , np.dot(A__ , A__ ) )
# Check convergence.
_lowercase =np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowercase =True
_lowercase =lambda_
if is_complex:
_lowercase =np.real(lambda_ )
return lambda_, vector
def a ( ) -> None:
"""simple docstring"""
_lowercase =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowercase =np.array([41, 4, 20] )
_lowercase =real_input_matrix.astype(np.complexaaa )
_lowercase =np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowercase =np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowercase =real_input_matrix
_lowercase =real_vector
elif problem_type == "complex":
_lowercase =complex_input_matrix
_lowercase =complex_vector
# Our implementation.
_lowercase , _lowercase =power_iteration(A__ , A__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowercase , _lowercase =np.linalg.eigh(A__ )
# Last eigenvalue is the maximum one.
_lowercase =eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowercase =eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(A__ ) - np.abs(A__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 205 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False , ) -> Tuple:
'''simple docstring'''
super().__init__()
_lowercase =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
_lowercase =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
_lowercase =False
_lowercase =nn.Dropout(p=lowerCAmelCase )
_lowercase =TaConfig(
vocab_size=lowerCAmelCase , d_model=lowerCAmelCase , num_heads=lowerCAmelCase , d_kv=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase , feed_forward_proj=lowerCAmelCase , is_decoder=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , )
_lowercase =nn.ModuleList()
for lyr_num in range(lowerCAmelCase ):
_lowercase =TaBlock(lowerCAmelCase )
self.encoders.append(lowerCAmelCase )
_lowercase =TaLayerNorm(lowerCAmelCase )
_lowercase =nn.Dropout(p=lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =self.token_embedder(lowerCAmelCase )
_lowercase =encoder_input_tokens.shape[1]
_lowercase =torch.arange(lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase )
_lowercase =self.dropout_pre(lowerCAmelCase )
# inverted the attention mask
_lowercase =encoder_input_tokens.size()
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase )
for lyr in self.encoders:
_lowercase =lyr(lowerCAmelCase , lowerCAmelCase )[0]
_lowercase =self.layer_norm(lowerCAmelCase )
return self.dropout_post(lowerCAmelCase ), encoder_inputs_mask
| 205 | 1 |
def lowerCAmelCase_ ( ):
__snake_case : Tuple = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
__snake_case : int = 6
__snake_case : int = 1
__snake_case : Dict = 1_9_0_1
__snake_case : List[str] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__snake_case : Optional[int] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
__snake_case : Any = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
__snake_case : Optional[Any] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
__snake_case : Dict = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 134 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["input_features", "is_longer"]
def __init__( self : Optional[int] , lowerCamelCase : Any=64 , lowerCamelCase : Dict=48000 , lowerCamelCase : Dict=480 , lowerCamelCase : Tuple=10 , lowerCamelCase : Optional[int]=1024 , lowerCamelCase : int=0.0 , lowerCamelCase : Any=False , lowerCamelCase : float = 0 , lowerCamelCase : float = 14000 , lowerCamelCase : int = None , lowerCamelCase : str = "fusion" , lowerCamelCase : str = "repeatpad" , **lowerCamelCase : Optional[int] , ) -> Dict:
super().__init__(
feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
__snake_case : Optional[Any] = top_db
__snake_case : Dict = truncation
__snake_case : Dict = padding
__snake_case : Optional[Any] = fft_window_size
__snake_case : Optional[Any] = (fft_window_size >> 1) + 1
__snake_case : Dict = hop_length
__snake_case : Optional[int] = max_length_s
__snake_case : Optional[int] = max_length_s * sampling_rate
__snake_case : Dict = sampling_rate
__snake_case : Optional[int] = frequency_min
__snake_case : Any = frequency_max
__snake_case : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm=lowerCamelCase , mel_scale="htk" , )
__snake_case : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case ( self : str ) -> Dict[str, Any]:
__snake_case : List[str] = copy.deepcopy(self.__dict__ )
__snake_case : List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __snake_case ( self : List[Any] , lowerCamelCase : np.array , lowerCamelCase : Optional[np.array] = None ) -> np.ndarray:
__snake_case : List[Any] = spectrogram(
lowerCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any ) -> str:
__snake_case : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__snake_case : Tuple = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__snake_case : Tuple = [0]
# randomly choose index for each part
__snake_case : List[Any] = np.random.choice(ranges[0] )
__snake_case : int = np.random.choice(ranges[1] )
__snake_case : List[str] = np.random.choice(ranges[2] )
__snake_case : Dict = mel[idx_front : idx_front + chunk_frames, :]
__snake_case : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
__snake_case : Tuple = mel[idx_back : idx_back + chunk_frames, :]
__snake_case : Optional[Any] = torch.tensor(mel[None, None, :] )
__snake_case : Optional[int] = torch.nn.functional.interpolate(
lowerCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : List[Any] = mel_shrink[0][0].numpy()
__snake_case : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __snake_case ( self : Any , lowerCamelCase : np.array , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Dict ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__snake_case : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__snake_case : Tuple = len(lowerCamelCase ) - max_length
__snake_case : List[Any] = np.random.randint(0 , overflow + 1 )
__snake_case : Dict = waveform[idx : idx + max_length]
__snake_case : Optional[int] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__snake_case : Any = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
__snake_case : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__snake_case : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__snake_case : str = np.stack([mel, mel, mel, mel] , axis=0 )
__snake_case : Optional[Any] = False
else:
__snake_case : Any = self._random_mel_fusion(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : Tuple = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
__snake_case : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__snake_case : List[str] = int(max_length / len(lowerCamelCase ) )
__snake_case : Any = np.stack(np.tile(lowerCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__snake_case : str = int(max_length / len(lowerCamelCase ) )
__snake_case : List[str] = np.stack(np.tile(lowerCamelCase , lowerCamelCase ) )
__snake_case : str = np.pad(lowerCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
__snake_case : List[str] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
__snake_case : List[str] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__snake_case : Optional[int] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase : str = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : Any , ) -> BatchFeature:
__snake_case : Union[str, Any] = truncation if truncation is not None else self.truncation
__snake_case : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__snake_case : Any = isinstance(lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__snake_case : str = is_batched_numpy or (
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case : Tuple = [np.asarray(lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
__snake_case : str = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case : Union[str, Any] = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
__snake_case : Optional[int] = [
self._get_input_mel(lowerCamelCase , max_length if max_length else self.nb_max_samples , lowerCamelCase , lowerCamelCase )
for waveform in raw_speech
]
__snake_case : Optional[int] = []
__snake_case : Any = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__snake_case : Optional[Any] = np.random.randint(0 , len(lowerCamelCase ) )
__snake_case : Union[str, Any] = True
if isinstance(input_mel[0] , lowerCamelCase ):
__snake_case : List[str] = [np.asarray(lowerCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__snake_case : Any = [[longer] for longer in is_longer]
__snake_case : Tuple = {"input_features": input_mel, "is_longer": is_longer}
__snake_case : List[str] = BatchFeature(lowerCamelCase )
if return_tensors is not None:
__snake_case : Any = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 134 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str) ->Optional[Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
A__ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = '''sgugger/tiny-distilbert-classification'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , only_pretrain_model=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , torchscript=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
# set architectures equal to `None`
A__ = None
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase__ , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
A__ = '''sshleifer/tinier_bart'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = '''sshleifer/tinier_bart'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , save_to_csv=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase__ , '''inf_time.csv''') , train_memory_csv_file=os.path.join(UpperCAmelCase__ , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(UpperCAmelCase__ , '''inf_mem.csv''') , train_time_csv_file=os.path.join(UpperCAmelCase__ , '''train_time.csv''') , env_info_csv_file=os.path.join(UpperCAmelCase__ , '''env.csv''') , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''env.csv''')).exists())
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(UpperCAmelCase__ : Tuple):
self.assertTrue(hasattr(UpperCAmelCase__ , '''sequential'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''cumulative'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''current'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase__ , '''log.txt''') , log_print=UpperCAmelCase__ , trace_memory_line_by_line=UpperCAmelCase__ , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''log.txt''')).exists())
| 14 |
from __future__ import annotations
import typing
from collections import Counter
def A__ ( SCREAMING_SNAKE_CASE__) -> typing.Counter[int]:
__snake_case: typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1):
for perpendicular in range(SCREAMING_SNAKE_CASE__ , max_perimeter + 1):
__snake_case: Dict = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(SCREAMING_SNAKE_CASE__):
__snake_case: Any = int(base + perpendicular + hypotenuse)
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def A__ ( SCREAMING_SNAKE_CASE__ = 1000) -> int:
__snake_case: List[str] = pythagorean_triple(SCREAMING_SNAKE_CASE__)
return triplets.most_common(1)[0][0]
if __name__ == "__main__":
print(f'Perimeter {solution()} has maximum solutions')
| 111 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : str , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ : Any = {}
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Dict , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = super().add_tokens(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if num_added_tokens == 0:
raise ValueError(
F"The tokenizer already contains the token {placeholder_token}. Please pass a different"
''' `placeholder_token` that is not already in the tokenizer.''' )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Tuple , *lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple=1 , **lowerCamelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : str = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
output.append(lowerCamelCase__ )
else:
UpperCamelCase__ : int = []
for i in range(lowerCamelCase__ ):
UpperCamelCase__ : Tuple = placeholder_token + F"_{i}"
self.try_adding_tokens(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
output.append(lowerCamelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"The tokenizer already has placeholder token {token} that can get confused with"
F" {placeholder_token}keep placeholder tokens independent" )
UpperCamelCase__ : int = output
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Optional[int]=1.0 ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : int = []
for i in range(len(lowerCamelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCamelCase__ : Any = self.token_map[placeholder_token]
UpperCamelCase__ : Union[str, Any] = tokens[: 1 + int(len(lowerCamelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
UpperCamelCase__ : List[str] = copy.copy(lowerCamelCase__ )
random.shuffle(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = text.replace(lowerCamelCase__ , ''' '''.join(lowerCamelCase__ ) )
return text
def __call__( self : Dict , lowerCamelCase__ : Tuple , *lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : List[str]=1.0 , **lowerCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase__ , vector_shuffle=lowerCamelCase__ , prop_tokens_to_load=lowerCamelCase__ ) , *lowerCamelCase__ , **lowerCamelCase__ , )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Optional[Any] , *lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : str=1.0 , **lowerCamelCase__ : Any ) -> int:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase__ , vector_shuffle=lowerCamelCase__ , prop_tokens_to_load=lowerCamelCase__ ) , *lowerCamelCase__ , **lowerCamelCase__ , )
| 51 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[str] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["MobileNetV2FeatureExtractor"]
__UpperCamelCase : List[str] = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 1 |
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 267 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Dict = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ['LayoutLMv2FeatureExtractor']
lowerCAmelCase : int = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 | 0 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE)
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,*_snake_case ,**_snake_case ):
super().__init__(*_snake_case ,**_snake_case )
requires_backends(self ,"decord" )
self.check_model_type(_snake_case )
def UpperCamelCase__ ( self ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
UpperCAmelCase_ : Optional[Any] = {}
if frame_sampling_rate is not None:
UpperCAmelCase_ : Optional[int] = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase_ : Union[str, Any] = num_frames
UpperCAmelCase_ : int = {}
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,_snake_case ,**_snake_case ):
return super().__call__(_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ,_snake_case=1 ):
if num_frames is None:
UpperCAmelCase_ : Any = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
UpperCAmelCase_ : Optional[int] = BytesIO(requests.get(_snake_case ).content )
UpperCAmelCase_ : Tuple = VideoReader(_snake_case )
videoreader.seek(0 )
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[Any] = num_frames * frame_sampling_rate - 1
UpperCAmelCase_ : Dict = np.linspace(_snake_case ,_snake_case ,num=_snake_case ,dtype=np.intaa )
UpperCAmelCase_ : int = videoreader.get_batch(_snake_case ).asnumpy()
UpperCAmelCase_ : int = list(_snake_case )
UpperCAmelCase_ : Any = self.image_processor(_snake_case ,return_tensors=self.framework )
return model_inputs
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : int = self.model(**_snake_case )
return model_outputs
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=5 ):
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : List[str] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = probs.topk(_snake_case )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase_ : int = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case ,_snake_case )]
| 67 |
'''simple docstring'''
from collections.abc import Sequence
def a__ ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0.0
for coeff in reversed(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 67 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : List[str] = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase :
def __init__( self : List[str] , __lowercase : Collection[float] | None = None ):
"""simple docstring"""
if components is None:
__lowercase =[]
__lowercase =list(__lowercase )
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return len(self.__components )
def __str__( self : int ):
"""simple docstring"""
return "(" + ",".join(map(__lowercase , self.__components ) ) + ")"
def __add__( self : List[Any] , __lowercase : Vector ):
"""simple docstring"""
__lowercase =len(self )
if size == len(__lowercase ):
__lowercase =[self.__components[i] + other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else:
raise Exception('must have the same size' )
def __sub__( self : str , __lowercase : Vector ):
"""simple docstring"""
__lowercase =len(self )
if size == len(__lowercase ):
__lowercase =[self.__components[i] - other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , __lowercase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : Tuple , __lowercase : Vector ):
"""simple docstring"""
...
def __mul__( self : int , __lowercase : float | Vector ):
"""simple docstring"""
if isinstance(__lowercase , (float, int) ):
__lowercase =[c * other for c in self.__components]
return Vector(__lowercase )
elif isinstance(__lowercase , __lowercase ) and len(self ) == len(__lowercase ):
__lowercase =len(self )
__lowercase =[self.__components[i] * other.component(__lowercase ) for i in range(__lowercase )]
return sum(__lowercase )
else: # error case
raise Exception('invalid operand!' )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return Vector(self.__components )
def snake_case ( self : str , __lowercase : int ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def snake_case ( self : List[str] , __lowercase : int , __lowercase : float ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__lowercase =value
def snake_case ( self : Tuple ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__lowercase =[c**2 for c in self.__components]
return math.sqrt(sum(__lowercase ) )
def snake_case ( self : List[Any] , __lowercase : Vector , __lowercase : bool = False ):
"""simple docstring"""
__lowercase =self * other
__lowercase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__, lowercase__ )
return Vector([0] * dimension )
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__, lowercase__ ) and (isinstance(lowercase__, lowercase__ ))
__lowercase =[0] * dimension
__lowercase =1
return Vector(lowercase__ )
def __UpperCamelCase ( lowercase__ : float, lowercase__ : Vector, lowercase__ : Vector ):
'''simple docstring'''
assert (
isinstance(lowercase__, lowercase__ )
and isinstance(lowercase__, lowercase__ )
and (isinstance(lowercase__, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
random.seed(lowercase__ )
__lowercase =[random.randint(lowercase__, lowercase__ ) for _ in range(lowercase__ )]
return Vector(lowercase__ )
class lowerCAmelCase :
def __init__( self : Dict , __lowercase : list[list[float]] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
__lowercase =matrix
__lowercase =w
__lowercase =h
def __str__( self : Optional[Any] ):
"""simple docstring"""
__lowercase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Union[str, Any] , __lowercase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__lowercase =[]
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] + other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : int , __lowercase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__lowercase =[]
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] - other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , __lowercase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : str , __lowercase : Vector ):
"""simple docstring"""
...
def __mul__( self : Tuple , __lowercase : float | Vector ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ): # matrix-vector
if len(__lowercase ) == self.__width:
__lowercase =zero_vector(self.__height )
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] * other.component(__lowercase )
for j in range(self.__width )
]
ans.change_component(__lowercase , sum(__lowercase ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(__lowercase , (int, float) ): # matrix-scalar
__lowercase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowercase , self.__width , self.__height )
return None
def snake_case ( self : int ):
"""simple docstring"""
return self.__height
def snake_case ( self : List[str] ):
"""simple docstring"""
return self.__width
def snake_case ( self : Dict , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def snake_case ( self : Dict , __lowercase : int , __lowercase : int , __lowercase : float ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__lowercase =value
else:
raise Exception('change_component: indices out of bounds' )
def snake_case ( self : Dict , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__lowercase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowercase ) ):
__lowercase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowercase , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case ( self : Union[str, Any] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowercase , __lowercase )
else:
raise Exception('Indices out of bounds' )
def snake_case ( self : Tuple ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__lowercase =[
self.__matrix[0][y] * self.cofactor(0 , __lowercase ) for y in range(self.__width )
]
return sum(__lowercase )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
__lowercase =[[0] * n for _ in range(lowercase__ )]
return Matrix(lowercase__, lowercase__, lowercase__ )
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int, lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
random.seed(lowercase__ )
__lowercase =[
[random.randint(lowercase__, lowercase__ ) for _ in range(lowercase__ )] for _ in range(lowercase__ )
]
return Matrix(lowercase__, lowercase__, lowercase__ )
| 141 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Any:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def A__ ( self ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase ,UpperCamelCase = image.size
else:
UpperCamelCase ,UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = self.size["""shortest_edge"""]
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase ,UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""image_id""": 39769, """annotations""": target}
# encode them
UpperCamelCase = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
@slow
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
UpperCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify masks
UpperCamelCase = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
| 183 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCamelCase : Union[str, Any] = getLogger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase = 8 , _lowercase = 1_024 , _lowercase="val" , _lowercase=None , _lowercase=False , _lowercase="summarization" , _lowercase=None , _lowercase=1 , _lowercase = None , _lowercase="" , **_lowercase , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = str(_lowercase )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_lowercase )
SCREAMING_SNAKE_CASE : Tuple = Path(_lowercase )
SCREAMING_SNAKE_CASE : int = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = AutoModelForSeqaSeqLM.from_pretrained(_lowercase ).cuda()
if fpaa:
SCREAMING_SNAKE_CASE : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_lowercase , _lowercase ) # update config with task specific params
SCREAMING_SNAKE_CASE : Optional[Any] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
SCREAMING_SNAKE_CASE : int = num_return_sequences
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(_lowercase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.model_max_length
if prefix is None:
SCREAMING_SNAKE_CASE : Tuple = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
SCREAMING_SNAKE_CASE : Any = SeqaSeqDataset(
_lowercase , _lowercase , _lowercase , max_target_length=1_024 , type_path=_lowercase , n_obs=_lowercase , prefix=_lowercase , **_lowercase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
SCREAMING_SNAKE_CASE : str = ds.make_sortish_sampler(_lowercase , distributed=_lowercase , add_extra_examples=_lowercase , shuffle=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(_lowercase , sampler=_lowercase , batch_size=_lowercase , collate_fn=ds.collate_fn )
SCREAMING_SNAKE_CASE : List[str] = []
for batch in tqdm(_lowercase ):
SCREAMING_SNAKE_CASE : Tuple = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_lowercase , num_beams=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = batch['''ids''']
if num_return_sequences > 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = chunks(_lowercase , _lowercase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_lowercase ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_lowercase , _lowercase )
return results, sampler.num_replicas
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_lowercase , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_lowercase , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_lowercase , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_lowercase , default=_lowercase )
parser.add_argument(
'''--type_path''' , type=_lowercase , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_lowercase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_lowercase , default=8 , required=_lowercase , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_lowercase , default=-1 , required=_lowercase , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_lowercase , default=_lowercase , required=_lowercase , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_lowercase , default=1 , required=_lowercase , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_lowercase , default=600 , required=_lowercase , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_lowercase , default=_lowercase , required=_lowercase )
parser.add_argument('''--tgt_lang''' , type=_lowercase , default=_lowercase , required=_lowercase )
parser.add_argument(
'''--prefix''' , type=_lowercase , required=_lowercase , default=_lowercase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
SCREAMING_SNAKE_CASE : str = time.time()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = parser.parse_known_args()
SCREAMING_SNAKE_CASE : List[str] = parse_numeric_n_bool_cl_kwargs(_lowercase )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
SCREAMING_SNAKE_CASE : Dict = Path(args.save_dir + '''_tmp''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase ) # this handles locking.
SCREAMING_SNAKE_CASE : Dict = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
SCREAMING_SNAKE_CASE : str = {}
if args.src_lang is not None:
SCREAMING_SNAKE_CASE : Dict = args.src_lang
if args.tgt_lang is not None:
SCREAMING_SNAKE_CASE : int = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = eval_data_dir(
args.data_dir , _lowercase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_lowercase , **_lowercase , )
if args.local_rank <= 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=_lowercase )
SCREAMING_SNAKE_CASE : int = gather_results_from_each_node(_lowercase , _lowercase , args.sync_timeout )
SCREAMING_SNAKE_CASE : Optional[int] = combine_partial_results(_lowercase )
if args.num_return_sequences > 1:
SCREAMING_SNAKE_CASE : Dict = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(_lowercase , _lowercase )
return
SCREAMING_SNAKE_CASE : int = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_lowercase ) as f:
SCREAMING_SNAKE_CASE : Any = [x.rstrip() for x in f.readlines()][: len(_lowercase )]
# Calculate metrics, save metrics, and save _generations.txt
SCREAMING_SNAKE_CASE : Optional[int] = '''translation''' in args.task
SCREAMING_SNAKE_CASE : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
SCREAMING_SNAKE_CASE : int = '''bleu''' if calc_bleu else '''rouge'''
SCREAMING_SNAKE_CASE : Union[str, Any] = score_fn(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : int = len(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = time.time() - start_time
SCREAMING_SNAKE_CASE : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
SCREAMING_SNAKE_CASE : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
SCREAMING_SNAKE_CASE : Union[str, Any] = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(_lowercase , _lowercase , indent=_lowercase )
print(_lowercase )
write_txt_file(_lowercase , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(_lowercase , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(_lowercase )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = []
for partial_result in partial_results:
records.extend(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = sorted(_lowercase , key=lambda _lowercase : x["id"] )
SCREAMING_SNAKE_CASE : Tuple = [x['''pred'''] for x in records]
return preds
def A ( _lowercase , _lowercase , _lowercase ):
# WAIT FOR lots of .json files
SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
logger.info('''waiting for all nodes to finish''' )
SCREAMING_SNAKE_CASE : List[str] = None
while (time.time() - start_wait) < timeout:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(_lowercase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
SCREAMING_SNAKE_CASE : Any = lmap(_lowercase , _lowercase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 182 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def _lowercase ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 229 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_lowercase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowercase = concatenate_datasets
_lowercase = DownloadConfig
_lowercase = DownloadManager
_lowercase = DownloadMode
_lowercase = DownloadConfig
_lowercase = DownloadMode
_lowercase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 229 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( _A, _A, _A, _A, _A, _A ):
"""simple docstring"""
if (ksize % 2) == 0:
__magic_name__ : Tuple = ksize + 1
__magic_name__ : Union[str, Any] = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(_snake_case ):
for x in range(_snake_case ):
# distance from center
__magic_name__ : Tuple = x - ksize // 2
__magic_name__ : Union[str, Any] = y - ksize // 2
# degree to radiant
__magic_name__ : List[Any] = theta / 180 * np.pi
__magic_name__ : Dict = np.cos(_theta )
__magic_name__ : List[str] = np.sin(_theta )
# get kernel x
__magic_name__ : Optional[Any] = cos_theta * px + sin_theta * py
# get kernel y
__magic_name__ : Dict = -sin_theta * px + cos_theta * py
# fill kernel
__magic_name__ : int = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__magic_name__: str = imread("../image_data/lena.jpg")
# turn image in gray scale value
__magic_name__: List[Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__magic_name__: Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__magic_name__: Union[str, Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__magic_name__: Any = out / out.max() * 255
__magic_name__: List[Any] = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 342 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : List[str] = getLogger(__name__)
def lowercase ( _snake_case : Tuple , _snake_case : str , _snake_case : str , _snake_case : int = 8 , _snake_case : int = 1_024 , _snake_case : Any="val" , _snake_case : Tuple=None , _snake_case : Any=False , _snake_case : str="summarization" , _snake_case : Dict=None , _snake_case : Optional[Any]=1 , _snake_case : Dict = None , _snake_case : List[Any]="" , **_snake_case : int , ) ->Dict:
"""simple docstring"""
__snake_case : int = str(_snake_case )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_snake_case )
__snake_case : Optional[Any] = Path(_snake_case )
__snake_case : str = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(_snake_case )
__snake_case : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).cuda()
if fpaa:
__snake_case : List[str] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_snake_case , _snake_case ) # update config with task specific params
__snake_case : Dict = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__snake_case : Optional[Any] = num_return_sequences
__snake_case : Dict = AutoTokenizer.from_pretrained(_snake_case )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__snake_case : List[str] = tokenizer.model_max_length
if prefix is None:
__snake_case : List[str] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__snake_case : List[str] = SeqaSeqDataset(
_snake_case , _snake_case , _snake_case , max_target_length=1_024 , type_path=_snake_case , n_obs=_snake_case , prefix=_snake_case , **_snake_case , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__snake_case : Union[str, Any] = ds.make_sortish_sampler(_snake_case , distributed=_snake_case , add_extra_examples=_snake_case , shuffle=_snake_case )
__snake_case : List[Any] = DataLoader(_snake_case , sampler=_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn )
__snake_case : Union[str, Any] = []
for batch in tqdm(_snake_case ):
__snake_case : Tuple = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_snake_case , num_beams=_snake_case , **_snake_case , )
__snake_case : List[Any] = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
__snake_case : List[str] = batch['''ids''']
if num_return_sequences > 1:
__snake_case : Dict = chunks(_snake_case , _snake_case ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_snake_case ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_snake_case , _snake_case )
return results, sampler.num_replicas
def lowercase ( ) ->int:
"""simple docstring"""
__snake_case : Any = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_snake_case , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_snake_case , default=_snake_case )
parser.add_argument(
'''--type_path''' , type=_snake_case , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_snake_case , default=8 , required=_snake_case , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_snake_case , default=-1 , required=_snake_case , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_snake_case , default=_snake_case , required=_snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_snake_case , default=1 , required=_snake_case , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_snake_case , default=600 , required=_snake_case , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument('''--tgt_lang''' , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument(
'''--prefix''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__snake_case : str = time.time()
__snake_case , __snake_case : Any = parser.parse_known_args()
__snake_case : List[Any] = parse_numeric_n_bool_cl_kwargs(_snake_case )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
__snake_case : List[Any] = Path(args.save_dir + '''_tmp''' )
Path(_snake_case ).mkdir(exist_ok=_snake_case ) # this handles locking.
__snake_case : Optional[int] = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__snake_case : Dict = {}
if args.src_lang is not None:
__snake_case : Dict = args.src_lang
if args.tgt_lang is not None:
__snake_case : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_snake_case )
__snake_case , __snake_case : List[Any] = eval_data_dir(
args.data_dir , _snake_case , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_snake_case , **_snake_case , )
if args.local_rank <= 0:
__snake_case : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=_snake_case )
__snake_case : Optional[Any] = gather_results_from_each_node(_snake_case , _snake_case , args.sync_timeout )
__snake_case : str = combine_partial_results(_snake_case )
if args.num_return_sequences > 1:
__snake_case : List[Any] = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(_snake_case , _snake_case )
return
__snake_case : Tuple = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_snake_case ) as f:
__snake_case : Optional[Any] = [x.rstrip() for x in f.readlines()][: len(_snake_case )]
# Calculate metrics, save metrics, and save _generations.txt
__snake_case : List[str] = '''translation''' in args.task
__snake_case : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
__snake_case : Dict = '''bleu''' if calc_bleu else '''rouge'''
__snake_case : Dict = score_fn(_snake_case , _snake_case )
__snake_case : int = len(_snake_case )
__snake_case : Dict = time.time() - start_time
__snake_case : Optional[Any] = round(runtime / metrics['''n_obs'''] , 4 )
__snake_case : List[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__snake_case : int = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(_snake_case , _snake_case , indent=_snake_case )
print(_snake_case )
write_txt_file(_snake_case , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(_snake_case , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(_snake_case )
def lowercase ( _snake_case : Union[str, Any] ) ->List:
"""simple docstring"""
__snake_case : List[Any] = []
for partial_result in partial_results:
records.extend(_snake_case )
__snake_case : List[str] = sorted(_snake_case , key=lambda _snake_case : x["id"] )
__snake_case : Tuple = [x['''pred'''] for x in records]
return preds
def lowercase ( _snake_case : int , _snake_case : List[str] , _snake_case : List[Any] ) ->List[Dict[str, List]]:
"""simple docstring"""
__snake_case : List[str] = time.time()
logger.info('''waiting for all nodes to finish''' )
__snake_case : List[str] = None
while (time.time() - start_wait) < timeout:
__snake_case : Any = list(save_dir.glob('''rank_*.json''' ) )
if len(_snake_case ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__snake_case : Tuple = lmap(_snake_case , _snake_case )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 102 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowercase : List[str] = logging.get_logger(__name__)
class A__ ( _a ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase) -> str:
'''simple docstring'''
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , _a , )
super().__init__(*_a , **_a)
| 370 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A_ ( A__ ) -> str:
a__ : Any = 384
if "tiny" in model_name:
a__ : List[Any] = [3, 3, 9, 3]
a__ : Optional[Any] = [96, 192, 384, 768]
if "small" in model_name:
a__ : Union[str, Any] = [3, 3, 27, 3]
a__ : List[Any] = [96, 192, 384, 768]
if "base" in model_name:
a__ : int = [3, 3, 27, 3]
a__ : List[str] = [128, 256, 512, 1024]
a__ : Optional[int] = 512
if "large" in model_name:
a__ : Optional[int] = [3, 3, 27, 3]
a__ : Any = [192, 384, 768, 1536]
a__ : int = 768
if "xlarge" in model_name:
a__ : str = [3, 3, 27, 3]
a__ : int = [256, 512, 1024, 2048]
a__ : List[str] = 1024
# set label information
a__ : int = 150
a__ : List[Any] = 'huggingface/label-files'
a__ : str = 'ade20k-id2label.json'
a__ : Optional[int] = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
a__ : List[str] = {int(A__ ): v for k, v in idalabel.items()}
a__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
a__ : List[Any] = ConvNextConfig(
depths=A__ , hidden_sizes=A__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
a__ : Optional[int] = UperNetConfig(
backbone_config=A__ , auxiliary_in_channels=A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ , )
return config
def A_ ( A__ ) -> Tuple:
a__ : Optional[int] = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def A_ ( A__ , A__ , A__ ) -> str:
a__ : List[str] = dct.pop(A__ )
a__ : int = val
def A_ ( A__ , A__ , A__ ) -> str:
a__ : Tuple = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
a__ : Dict = model_name_to_url[model_name]
a__ : Optional[int] = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )['state_dict']
a__ : List[Any] = get_upernet_config(A__ )
a__ : Dict = UperNetForSemanticSegmentation(A__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a__ : Dict = state_dict.pop(A__ )
if "bn" in key:
a__ : Optional[int] = key.replace('bn' , 'batch_norm' )
a__ : List[Any] = val
# rename keys
a__ : Union[str, Any] = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
model.load_state_dict(A__ )
# verify on image
a__ : str = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
a__ : int = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('RGB' )
a__ : Union[str, Any] = SegformerImageProcessor()
a__ : Union[str, Any] = processor(A__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
a__ : Optional[Any] = model(A__ )
if model_name == "upernet-convnext-tiny":
a__ : Union[str, Any] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
a__ : int = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
a__ : int = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
a__ : Optional[Any] = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
a__ : Optional[int] = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(A__ )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 225 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'beit'
def __init__( self , _a=8_192 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=False , _a=False , _a=False , _a=False , _a=0.1 , _a=0.1 , _a=True , _a=[3, 5, 7, 11] , _a=[1, 2, 3, 6] , _a=True , _a=0.4 , _a=256 , _a=1 , _a=False , _a=255 , **_a , ):
super().__init__(**_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = use_mask_token
__a = use_absolute_position_embeddings
__a = use_relative_position_bias
__a = use_shared_relative_position_bias
__a = layer_scale_init_value
__a = drop_path_rate
__a = use_mean_pooling
# decode head attributes (semantic segmentation)
__a = out_indices
__a = pool_scales
# auxiliary head attributes (semantic segmentation)
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = semantic_loss_ignore_index
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
| 45 | """simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase_ :
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Any ) -> Tuple:
raise NotImplementedError()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
raise NotImplementedError()
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Dict , __UpperCamelCase : "AutoTokenizer" , __UpperCamelCase : bool = False , **__UpperCamelCase : Tuple ) -> str:
_UpperCamelCase = tokenizer
_UpperCamelCase = skip_prompt
_UpperCamelCase = decode_kwargs
# variables used in the streaming process
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = True
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Dict ) -> Optional[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
_UpperCamelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_UpperCamelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
_UpperCamelCase = text[self.print_len :]
_UpperCamelCase = []
_UpperCamelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(__UpperCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_UpperCamelCase = text[self.print_len :]
self.print_len += len(__UpperCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_UpperCamelCase = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__UpperCamelCase )
self.on_finalized_text(__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_UpperCamelCase = text[self.print_len :]
_UpperCamelCase = []
_UpperCamelCase = 0
else:
_UpperCamelCase = ''''''
_UpperCamelCase = True
self.on_finalized_text(__UpperCamelCase , stream_end=__UpperCamelCase )
def _UpperCamelCase ( self : int , __UpperCamelCase : str , __UpperCamelCase : bool = False ) -> Tuple:
print(__UpperCamelCase , flush=__UpperCamelCase , end='''''' if not stream_end else None )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Union[str, Any] , __UpperCamelCase : "AutoTokenizer" , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[float] = None , **__UpperCamelCase : Optional[int] ) -> Optional[Any]:
super().__init__(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = Queue()
_UpperCamelCase = None
_UpperCamelCase = timeout
def _UpperCamelCase ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : bool = False ) -> Any:
self.text_queue.put(__UpperCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[Any] ) -> List[str]:
return self
def _UpperCamelCase ( self : int ) -> Dict:
_UpperCamelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 256 | 0 |
"""simple docstring"""
def _a ( ):
"""simple docstring"""
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_UpperCamelCase = generate_large_matrix()
_UpperCamelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _a ( _snake_case ):
"""simple docstring"""
assert all(row == sorted(_snake_case , reverse=_snake_case ) for row in grid )
assert all(list(_snake_case ) == sorted(_snake_case , reverse=_snake_case ) for col in zip(*_snake_case ) )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = len(_snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase = (left + right) // 2
UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase = mid + 1
else:
UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = len(grid[0] )
for i in range(len(_snake_case ) ):
UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(_snake_case ) * len(grid[0] )) - total
def _a ( _snake_case ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(_snake_case ):
if number < 0:
total += len(_snake_case ) - i
break
return total
def _a ( ):
"""simple docstring"""
from timeit import timeit
print("""Running benchmarks""" )
UpperCAmelCase = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase = timeit(F'''{func}(grid=grid)''' , setup=_snake_case , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 370 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_UpperCamelCase = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
_UpperCamelCase = dataset.iloc[:, 1:2].values
_UpperCamelCase = dataset.iloc[:, 2].values
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = train_test_split(X, y, test_size=0.2, random_state=0)
_UpperCamelCase = PolynomialFeatures(degree=4)
_UpperCamelCase = poly_reg.fit_transform(X)
_UpperCamelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _a ( ):
"""simple docstring"""
plt.scatter(_snake_case , _snake_case , color="""red""" )
plt.plot(_snake_case , pol_reg.predict(poly_reg.fit_transform(_snake_case ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 234 | 0 |
"""simple docstring"""
import random
class __lowerCAmelCase :
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _a ):
__a = [ord(_a ) for i in text]
__a = []
__a = []
for i in plain:
__a = random.randint(1 , 300 )
__a = (i + k) * k
cipher.append(_a )
key.append(_a )
return cipher, key
@staticmethod
def __UpperCAmelCase ( _a , _a ):
__a = []
for i in range(len(_a ) ):
__a = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_a ) )
return "".join(_a )
if __name__ == "__main__":
lowercase_ , lowercase_ = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 45 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 45 | 1 |
"""simple docstring"""
class snake_case__ :
def __init__( self ):
__a = {} # Mapping from char to TrieNode
__a = False
def a__ ( self , lowerCamelCase ):
for word in words:
self.insert(UpperCamelCase__ )
def a__ ( self , lowerCamelCase ):
__a = self
for char in word:
if char not in curr.nodes:
__a = TrieNode()
__a = curr.nodes[char]
__a = True
def a__ ( self , lowerCamelCase ):
__a = self
for char in word:
if char not in curr.nodes:
return False
__a = curr.nodes[char]
return curr.is_leaf
def a__ ( self , lowerCamelCase ):
def _delete(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> bool:
if index == len(UpperCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
__a = False
return len(curr.nodes ) == 0
__a = word[index]
__a = curr.nodes.get(UpperCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__a = _delete(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase__ , 0 )
def _lowerCamelCase( a , a ):
if node.is_leaf:
print(UpperCAmelCase__ , end=" " )
for key, value in node.nodes.items():
print_words(UpperCAmelCase__ , word + key )
def _lowerCamelCase( ):
__a = "banana bananas bandana band apple all beast".split()
__a = TrieNode()
root.insert_many(UpperCAmelCase__ )
# print_words(root, "")
assert all(root.find(UpperCAmelCase__ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _lowerCamelCase( a , a ):
print(str(UpperCAmelCase__ ) , "works!" if passes else "doesn't work :(" )
def _lowerCamelCase( ):
assert test_trie()
def _lowerCamelCase( ):
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 363 | """simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowerCamelCase( a ):
__a = torch.exp(a )
__a = torch.sum(a , dim=1 ) # sum of exp(x_i)
__a = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(a ) - B / A
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = config.output_attentions
__a = config.output_hidden_states
__a = nn.ModuleList([BertLayer(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
__a = nn.ModuleList([BertHighway(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
__a = [-1 for _ in range(config.num_hidden_layers )]
def a__ ( self , lowerCamelCase ):
if (type(lowerCamelCase ) is float) or (type(lowerCamelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__a = x
else:
__a = x
def a__ ( self , lowerCamelCase ):
__a = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
__a = ()
__a = ()
__a = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = layer_module(
lowerCamelCase , lowerCamelCase , head_mask[i] , lowerCamelCase , lowerCamelCase )
__a = layer_outputs[0]
if self.output_attentions:
__a = all_attentions + (layer_outputs[1],)
__a = (hidden_states,)
if self.output_hidden_states:
__a = current_outputs + (all_hidden_states,)
if self.output_attentions:
__a = current_outputs + (all_attentions,)
__a = self.highway[i](lowerCamelCase )
# logits, pooled_output
if not self.training:
__a = highway_exit[0]
__a = entropy(lowerCamelCase )
__a = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__a = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__a = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCamelCase , i + 1 )
else:
__a = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = (hidden_states,)
if self.output_hidden_states:
__a = outputs + (all_hidden_states,)
if self.output_attentions:
__a = outputs + (all_attentions,)
__a = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """, snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config
__a = BertEmbeddings(lowerCamelCase )
__a = DeeBertEncoder(lowerCamelCase )
__a = BertPooler(lowerCamelCase )
self.init_weights()
def a__ ( self ):
self.encoder.init_highway_pooler(self.pooler )
def a__ ( self ):
return self.embeddings.word_embeddings
def a__ ( self , lowerCamelCase ):
__a = value
def a__ ( self , lowerCamelCase ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCamelCase )
@add_start_docstrings_to_model_forward(lowerCamelCase )
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__a = input_ids.size()
elif inputs_embeds is not None:
__a = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__a = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__a = torch.ones(lowerCamelCase , device=lowerCamelCase )
if encoder_attention_mask is None:
__a = torch.ones(lowerCamelCase , device=lowerCamelCase )
if token_type_ids is None:
__a = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__a = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__a = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__a = encoder_attention_mask[:, None, None, :]
__a = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__a = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__a = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers )
__a = self.embeddings(
input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase )
__a = self.encoder(
lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase )
__a = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase ):
__a = message
__a = exit_layer # start from 1!
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = BertPooler(lowerCamelCase )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.Linear(config.hidden_size , config.num_labels )
def a__ ( self , lowerCamelCase ):
# Pooler
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase )
# "return" pooler_output
# BertModel
__a = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__a = bmodel_output[1]
__a = self.dropout(lowerCamelCase )
__a = self.classifier(lowerCamelCase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """, snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config.num_labels
__a = config.num_hidden_layers
__a = DeeBertModel(lowerCamelCase )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase )
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=-1 , lowerCamelCase=False , ):
__a = self.num_layers
try:
__a = self.bert(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__a = outputs[1]
__a = self.dropout(lowerCamelCase )
__a = self.classifier(lowerCamelCase )
__a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__a = e.message
__a = e.exit_layer
__a = outputs[0]
if not self.training:
__a = entropy(lowerCamelCase )
__a = []
__a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__a = []
for highway_exit in outputs[-1]:
__a = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase )
if train_highway:
__a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__a = (loss,) + outputs
if not self.training:
__a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 268 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
@property
def lowercase_ ( self : Optional[int] ) ->Union[str, Any]:
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowercase_ ( self : Dict ) ->int:
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_snake_case, 'feature_size' ) )
self.assertTrue(hasattr(_snake_case, 'sampling_rate' ) )
self.assertTrue(hasattr(_snake_case, 'padding_value' ) )
def lowercase_ ( self : Union[str, Any] ) ->Optional[Any]:
snake_case__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : List[str] = feat_extract.model_input_names[0]
snake_case__ : int = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_snake_case ) == len(_snake_case ) for x, y in zip(_snake_case, processed_features[input_name] ) ) )
snake_case__ : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
snake_case__ : List[Any] = BatchFeature({input_name: speech_inputs}, tensor_type='np' )
snake_case__ : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def lowercase_ ( self : Optional[Any] ) ->int:
snake_case__ : Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : Optional[int] = feat_extract.model_input_names[0]
snake_case__ : Dict = BatchFeature({input_name: speech_inputs}, tensor_type='pt' )
snake_case__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case__ : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def lowercase_ ( self : int ) ->Any:
snake_case__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
snake_case__ : str = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : Union[str, Any] = feat_extract.model_input_names[0]
snake_case__ : int = BatchFeature({input_name: speech_inputs}, tensor_type='tf' )
snake_case__ : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case__ : List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def lowercase_ ( self : Tuple, _snake_case : int=False ) ->Union[str, Any]:
def _inputs_have_equal_length(_snake_case : Union[str, Any] ):
snake_case__ : str = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case : List[Any], _snake_case : Optional[Any] ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case, _snake_case ):
if not np.allclose(np.asarray(_snake_case ), np.asarray(_snake_case ), atol=1e-3 ):
return False
return True
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
snake_case__ : Tuple = feat_extract.model_input_names[0]
snake_case__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
snake_case__ : Optional[int] = self.feat_extract_tester.seq_length_diff
snake_case__ : Union[str, Any] = self.feat_extract_tester.max_seq_length + pad_diff
snake_case__ : List[Any] = self.feat_extract_tester.min_seq_length
snake_case__ : int = self.feat_extract_tester.batch_size
snake_case__ : Union[str, Any] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
snake_case__ : Dict = feat_extract.pad(_snake_case, padding=_snake_case )
snake_case__ : List[Any] = input_a[input_name]
snake_case__ : Union[str, Any] = feat_extract.pad(_snake_case, padding='longest' )
snake_case__ : int = input_a[input_name]
snake_case__ : Dict = feat_extract.pad(_snake_case, padding='max_length', max_length=len(speech_inputs[-1] ) )
snake_case__ : List[Any] = input_a[input_name]
snake_case__ : List[Any] = feat_extract.pad(_snake_case, padding='longest', return_tensors='np' )
snake_case__ : int = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case, padding='max_length' )[input_name]
snake_case__ : int = feat_extract.pad(
_snake_case, padding='max_length', max_length=_snake_case, return_tensors='np' )
snake_case__ : int = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case, _snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
snake_case__ : str = feat_extract.pad(_snake_case, pad_to_multiple_of=1_0 )
snake_case__ : Any = input_a[input_name]
snake_case__ : Dict = feat_extract.pad(_snake_case, padding='longest', pad_to_multiple_of=1_0 )
snake_case__ : Any = input_a[input_name]
snake_case__ : Tuple = feat_extract.pad(
_snake_case, padding='max_length', pad_to_multiple_of=1_0, max_length=_snake_case )
snake_case__ : List[Any] = input_a[input_name]
snake_case__ : Dict = feat_extract.pad(
_snake_case, padding='max_length', pad_to_multiple_of=1_0, max_length=_snake_case, return_tensors='np', )
snake_case__ : List[str] = input_a[input_name]
self.assertTrue(all(len(_snake_case ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_snake_case, _snake_case ) )
snake_case__ : Any = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(_snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2], (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
snake_case__ : Dict = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def lowercase_ ( self : Optional[Any], _snake_case : str=False ) ->Optional[int]:
def _inputs_have_equal_length(_snake_case : Dict ):
snake_case__ : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case : List[Any], _snake_case : Optional[Any] ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case, _snake_case ):
if not np.allclose(np.asarray(_snake_case ), np.asarray(_snake_case ), atol=1e-3 ):
return False
return True
snake_case__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
snake_case__ : str = feat_extract.model_input_names[0]
snake_case__ : Tuple = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
snake_case__ : Tuple = feat_extract.pad(
_snake_case, padding='max_length', max_length=len(speech_inputs[0] ), truncation=_snake_case )
snake_case__ : Union[str, Any] = input_a[input_name]
snake_case__ : Dict = feat_extract.pad(_snake_case, padding='max_length', max_length=len(speech_inputs[0] ) )
snake_case__ : Optional[Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to smallest with np
snake_case__ : List[Any] = feat_extract.pad(
_snake_case, padding='max_length', max_length=len(speech_inputs[0] ), return_tensors='np', truncation=_snake_case, )
snake_case__ : Any = input_a[input_name]
snake_case__ : Union[str, Any] = feat_extract.pad(
_snake_case, padding='max_length', max_length=len(speech_inputs[0] ), return_tensors='np' )
snake_case__ : Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to middle
snake_case__ : Union[str, Any] = feat_extract.pad(
_snake_case, padding='max_length', max_length=len(speech_inputs[1] ), truncation=_snake_case, return_tensors='np', )
snake_case__ : Optional[int] = input_a[input_name]
snake_case__ : Union[str, Any] = feat_extract.pad(
_snake_case, padding='max_length', max_length=len(speech_inputs[1] ), truncation=_snake_case )
snake_case__ : Optional[Any] = input_a[input_name]
snake_case__ : Tuple = feat_extract.pad(
_snake_case, padding='max_length', max_length=len(speech_inputs[1] ), return_tensors='np' )
snake_case__ : str = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case, _snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case, truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case, padding='longest', truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case, padding='longest', truncation=_snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case, padding='max_length', truncation=_snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
snake_case__ : List[str] = 1_2
snake_case__ : Optional[int] = feat_extract.pad(
_snake_case, padding='max_length', max_length=len(speech_inputs[0] ), pad_to_multiple_of=_snake_case, truncation=_snake_case, )
snake_case__ : Tuple = input_a[input_name]
snake_case__ : Tuple = feat_extract.pad(
_snake_case, padding='max_length', max_length=len(speech_inputs[0] ), pad_to_multiple_of=_snake_case, )
snake_case__ : Dict = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
snake_case__ : str = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
snake_case__ : List[str] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
def lowercase_ ( self : List[Any] ) ->str:
self._check_padding(numpify=_snake_case )
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
self._check_padding(numpify=_snake_case )
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
self._check_truncation(numpify=_snake_case )
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
self._check_truncation(numpify=_snake_case )
@require_torch
def lowercase_ ( self : Union[str, Any] ) ->Union[str, Any]:
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
snake_case__ : Any = feat_extract.model_input_names[0]
snake_case__ : Any = BatchFeature({input_name: speech_inputs} )
snake_case__ : Optional[int] = feat_extract.pad(_snake_case, padding='longest', return_tensors='np' )[input_name]
snake_case__ : List[Any] = feat_extract.pad(_snake_case, padding='longest', return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def lowercase_ ( self : Dict ) ->Dict:
snake_case__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common()
snake_case__ : Union[str, Any] = feat_extract.model_input_names[0]
snake_case__ : List[Any] = BatchFeature({input_name: speech_inputs} )
snake_case__ : List[str] = feat_extract.pad(_snake_case, padding='longest', return_tensors='np' )[input_name]
snake_case__ : Any = feat_extract.pad(_snake_case, padding='longest', return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def lowercase_ ( self : Optional[int] ) ->List[str]:
snake_case__ : List[Any] = self.feat_extract_dict
snake_case__ : List[Any] = True
snake_case__ : Union[str, Any] = self.feature_extraction_class(**_snake_case )
snake_case__ : str = self.feat_extract_tester.prepare_inputs_for_common()
snake_case__ : List[str] = [len(_snake_case ) for x in speech_inputs]
snake_case__ : List[Any] = feat_extract.model_input_names[0]
snake_case__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
snake_case__ : Optional[Any] = feat_extract.pad(_snake_case, padding='longest', return_tensors='np' )
self.assertIn('attention_mask', _snake_case )
self.assertListEqual(list(processed.attention_mask.shape ), list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(), _snake_case )
def lowercase_ ( self : Optional[Any] ) ->Tuple:
snake_case__ : Optional[Any] = self.feat_extract_dict
snake_case__ : Any = True
snake_case__ : Optional[Any] = self.feature_extraction_class(**_snake_case )
snake_case__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
snake_case__ : Optional[int] = [len(_snake_case ) for x in speech_inputs]
snake_case__ : int = feat_extract.model_input_names[0]
snake_case__ : str = BatchFeature({input_name: speech_inputs} )
snake_case__ : Optional[int] = min(_snake_case )
snake_case__ : List[str] = feat_extract.pad(
_snake_case, padding='max_length', max_length=_snake_case, truncation=_snake_case, return_tensors='np' )
self.assertIn('attention_mask', _snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ), [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(), [max_length for x in speech_inputs] )
| 277 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :Optional[int] = logging.get_logger(__name__)
a_ :Dict = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """openai-gpt"""
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int], _snake_case : Dict=4_0_4_7_8, _snake_case : str=5_1_2, _snake_case : int=7_6_8, _snake_case : Tuple=1_2, _snake_case : Any=1_2, _snake_case : str="gelu", _snake_case : List[str]=0.1, _snake_case : Any=0.1, _snake_case : Dict=0.1, _snake_case : int=1e-5, _snake_case : Optional[Any]=0.0_2, _snake_case : List[Any]="cls_index", _snake_case : Any=True, _snake_case : Any=None, _snake_case : int=True, _snake_case : Optional[Any]=0.1, **_snake_case : List[Any], ) ->Optional[int]:
snake_case__ : int = vocab_size
snake_case__ : Dict = n_positions
snake_case__ : str = n_embd
snake_case__ : str = n_layer
snake_case__ : List[Any] = n_head
snake_case__ : List[Any] = afn
snake_case__ : Optional[Any] = resid_pdrop
snake_case__ : List[str] = embd_pdrop
snake_case__ : List[Any] = attn_pdrop
snake_case__ : Optional[int] = layer_norm_epsilon
snake_case__ : str = initializer_range
snake_case__ : List[str] = summary_type
snake_case__ : Optional[int] = summary_use_proj
snake_case__ : List[str] = summary_activation
snake_case__ : Optional[Any] = summary_first_dropout
snake_case__ : int = summary_proj_to_labels
super().__init__(**_snake_case )
| 277 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __SCREAMING_SNAKE_CASE , )
class lowercase ( __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : int = RobertaConfig
__SCREAMING_SNAKE_CASE : Optional[int] = '''roberta'''
def __init__( self , snake_case ):
super().__init__(UpperCamelCase__ )
snake_case_ = RobertaEmbeddings(UpperCamelCase__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , __SCREAMING_SNAKE_CASE , )
class lowercase ( __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Dict = RobertaConfig
__SCREAMING_SNAKE_CASE : str = '''roberta'''
def __init__( self , snake_case ):
super().__init__(UpperCamelCase__ )
snake_case_ = config.num_labels
snake_case_ = config.num_hidden_layers
snake_case_ = DeeRobertaModel(UpperCamelCase__ )
snake_case_ = nn.Dropout(config.hidden_dropout_prob )
snake_case_ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def a ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=-1 , snake_case=False , ):
snake_case_ = self.num_layers
try:
snake_case_ = self.roberta(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , )
snake_case_ = outputs[1]
snake_case_ = self.dropout(UpperCamelCase__ )
snake_case_ = self.classifier(UpperCamelCase__ )
snake_case_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ = e.message
snake_case_ = e.exit_layer
snake_case_ = outputs[0]
if not self.training:
snake_case_ = entropy(UpperCamelCase__ )
snake_case_ = []
snake_case_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ = []
for highway_exit in outputs[-1]:
snake_case_ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase__ )
if train_highway:
snake_case_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ = (loss,) + outputs
if not self.training:
snake_case_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 367 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=2 , snake_case=99 , snake_case=0 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=2 , snake_case=4 , snake_case="last" , snake_case=True , snake_case=None , snake_case=0 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_lengths
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = gelu_activation
snake_case_ = sinusoidal_embeddings
snake_case_ = causal
snake_case_ = asm
snake_case_ = n_langs
snake_case_ = vocab_size
snake_case_ = n_special
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = summary_type
snake_case_ = use_proj
snake_case_ = scope
snake_case_ = bos_token_id
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_input_lengths:
snake_case_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , 2 ).float()
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , lengths=snake_case , langs=snake_case )
snake_case_ = model(snake_case , langs=snake_case )
snake_case_ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMWithLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForQuestionAnsweringSimple(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case )
snake_case_ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , p_mask=snake_case , )
snake_case_ = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , )
((snake_case_) , ) = result_with_labels.to_tuple()
snake_case_ = model(snake_case , start_positions=snake_case , end_positions=snake_case )
((snake_case_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = XLMForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = self.num_labels
snake_case_ = XLMForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = self.num_choices
snake_case_ = XLMForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__SCREAMING_SNAKE_CASE : int = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a ( self , snake_case , snake_case , snake_case=False ):
snake_case_ = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def a ( self ):
snake_case_ = XLMModelTester(self )
snake_case_ = ConfigTester(self , config_class=snake_case , emb_dim=37 )
def a ( self ):
self.config_tester.run_common_tests()
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_attentions in attentions] , [True] * len(snake_case ) )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = min_length + idx + 1
snake_case_ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case ) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_hidden_states in hidden_states] , [True] * len(snake_case ) , )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case ):
# adds PAD dummy token
snake_case_ = min_length + idx + 1
snake_case_ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case ) , )
pass
@slow
def a ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = XLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(snake_case )
snake_case_ = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case ) # the president
snake_case_ = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case_ = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case )
| 200 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( _snake_case ):
'''simple docstring'''
lowerCAmelCase_ = ['''image_processor''', '''tokenizer''']
lowerCAmelCase_ = '''Pix2StructImageProcessor'''
lowerCAmelCase_ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] , __lowercase : List[str] , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = False
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self : str , __lowercase : int=None , __lowercase : Any = None , __lowercase : Tuple = True , __lowercase : Dict = False , __lowercase : str = None , __lowercase : int = None , __lowercase : Union[str, Any] = 20_48 , __lowercase : Dict = 0 , __lowercase : str = None , __lowercase : List[str] = None , __lowercase : Optional[Any] = False , __lowercase : List[Any] = False , __lowercase : Tuple = False , __lowercase : Optional[Any] = False , __lowercase : Optional[Any] = False , __lowercase : Union[str, Any] = True , __lowercase : Any = None , **__lowercase : Union[str, Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
snake_case_ = self.tokenizer
snake_case_ = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
snake_case_ = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , max_patches=_lowerCamelCase , **_lowerCamelCase )
else:
# add pixel_values and bbox
snake_case_ = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , max_patches=_lowerCamelCase , header_text=_lowerCamelCase , **_lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
snake_case_ = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
if "attention_mask" in text_encoding:
snake_case_ = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
snake_case_ = text_encoding.pop("input_ids" )
else:
snake_case_ = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCamelCase )
return encoding_image_processor
def snake_case__ ( self : List[str] , *__lowercase : int , **__lowercase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def snake_case__ ( self : Optional[Any] , *__lowercase : Tuple , **__lowercase : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 187 |
import math
class _snake_case :
def __init__( self , _lowerCamelCase=0 ): # a graph with Node 0,1,...,N-1
a :Optional[int] = n
a :Union[str, Any] = [
[math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase )
] # adjacency matrix for weight
a :List[Any] = [
[math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Tuple = w
def SCREAMING_SNAKE_CASE__ ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
a :Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return self.dp[u][v]
if __name__ == "__main__":
snake_case : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 94 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( A = "isbn/0140328726" ) -> dict:
lowerCAmelCase__ = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
lowerCAmelCase__ = F"""{olid} is not a valid Open Library olid"""
raise ValueError(A )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _snake_case ( A ) -> dict:
lowerCAmelCase__ = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
lowerCAmelCase__ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCAmelCase__ = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
lowerCAmelCase__ = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(A , A ):
lowerCAmelCase__ = ''', '''.join(A )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__UpperCAmelCase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
__UpperCAmelCase = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print('''\n'''.join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""") | 360 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCAmelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase__ : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase__ : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCAmelCase__ = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCAmelCase__ = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
lowerCAmelCase__ = text_classifier('''This is great !''' , return_all_scores=lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCAmelCase__ = text_classifier('''This is great !''' , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> int:
import torch
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = pipeline('''text-classification''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = pipeline('''text-classification''' , framework='''tf''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = TextClassificationPipeline(model=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCAmelCase__ = '''HuggingFace is in'''
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
lowerCAmelCase__ = ['''HuggingFace is in ''', '''Paris is in France''']
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}, {'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCAmelCase__ = text_classifier(lowerCamelCase_ , top_k=lowerCamelCase_ )
lowerCAmelCase__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] * N, [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] * N] , )
lowerCAmelCase__ = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCAmelCase__ = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(lowerCamelCase_ ):
text_classifier(lowerCamelCase_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCAmelCase__ = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) | 228 | 0 |
lowerCAmelCase__ = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 11 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 1 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def lowerCamelCase (a_ :Any) -> Optional[int]:
lowercase :int = np.max(a_ , axis=-1 , keepdims=a_)
lowercase :Any = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a_)
class __magic_name__ ( __UpperCAmelCase ):
def __snake_case ( self : Optional[int] , **snake_case__ : int ):
'''simple docstring'''
lowercase :int = {}
if "second_text" in kwargs:
lowercase :List[Any] = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def __snake_case ( self : List[str] , snake_case__ : int , snake_case__ : Dict=None ):
'''simple docstring'''
return self.tokenizer(snake_case__ , text_pair=snake_case__ , return_tensors=self.framework )
def __snake_case ( self : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
return self.model(**snake_case__ )
def __snake_case ( self : int , snake_case__ : List[str] ):
'''simple docstring'''
lowercase :int = model_outputs.logits[0].numpy()
lowercase :Tuple = softmax(snake_case__ )
lowercase :str = np.argmax(snake_case__ )
lowercase :List[Any] = self.model.config.idalabel[best_class]
lowercase :Dict = probabilities[best_class].item()
lowercase :List[str] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 357 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase = logging.get_logger(__name__)
def lowerCamelCase (a_ :str , a_ :Optional[int]) -> Union[str, Any]:
lowercase :List[str] = set()
lowercase :Dict = []
def parse_line(a_ :Dict):
for line in fp:
if isinstance(a_ , a_):
lowercase :Any = line.decode('''UTF-8''')
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' '''):
# process a single warning and move it to `selected_warnings`.
if len(a_) > 0:
lowercase :int = '''\n'''.join(a_)
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets):
selected_warnings.add(a_)
buffer.clear()
continue
else:
lowercase :Any = line.strip()
buffer.append(a_)
if from_gh:
for filename in os.listdir(a_):
lowercase :Optional[int] = os.path.join(a_ , a_)
if not os.path.isdir(a_):
# read the file
if filename != "warnings.txt":
continue
with open(a_) as fp:
parse_line(a_)
else:
try:
with zipfile.ZipFile(a_) as z:
for filename in z.namelist():
if not os.path.isdir(a_):
# read the file
if filename != "warnings.txt":
continue
with z.open(a_) as fp:
parse_line(a_)
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""")
return selected_warnings
def lowerCamelCase (a_ :Any , a_ :Optional[int]) -> Any:
lowercase :Tuple = set()
lowercase :Dict = [os.path.join(a_ , a_) for p in os.listdir(a_) if (p.endswith('''.zip''') or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a_ , a_))
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase (a_ :List[Any]) -> Optional[Any]:
return values.split(''',''')
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 172 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : str ):
lowercase_ :Union[str, Any] = sorted(zip(snake_case_ ,snake_case_ ) ,key=lambda __lowerCamelCase : x[0] / x[1] ,reverse=snake_case_ )
lowercase_ :Dict = [i[0] for i in r], [i[1] for i in r]
lowercase_ :List[str] = list(accumulate(snake_case_ ) )
lowercase_ :Dict = bisect(snake_case_ ,snake_case_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a_ = NewType("""DataClass""", Any)
a_ = NewType("""DataClassType""", Any)
def __lowercase ( snake_case_ : List[str] ) ->List[str]:
'''simple docstring'''
if isinstance(snake_case_ ,snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __lowercase ( snake_case_ : list ) ->Callable[[str], Any]:
'''simple docstring'''
__A : List[Any] = {str(snake_case_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(snake_case_ ,snake_case_ )
def __lowercase ( *,
snake_case_ : Union[str, List[str]] = None ,snake_case_ : str = None ,snake_case_ : Any = dataclasses.MISSING ,snake_case_ : Callable[[], Any] = dataclasses.MISSING ,snake_case_ : dict = None ,**snake_case_ : str ,) ->dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__A : Optional[Any] = {}
if aliases is not None:
__A : List[Any] = aliases
if help is not None:
__A : str = help
return dataclasses.field(metadata=snake_case_ ,default=snake_case_ ,default_factory=snake_case_ ,**snake_case_ )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
def __init__( self , __lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
if "formatter_class" not in kwargs:
__A : str = ArgumentDefaultsHelpFormatter
super().__init__(**__lowerCamelCase )
if dataclasses.is_dataclass(__lowerCamelCase ):
__A : Union[str, Any] = [dataclass_types]
__A : Optional[Any] = list(__lowerCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowerCamelCase )
@staticmethod
def UpperCamelCase__( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = F"""--{field.name}"""
__A : List[Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowerCamelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__A : Tuple = kwargs.pop('''aliases''' , [] )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : Optional[int] = [aliases]
__A : str = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowerCamelCase , '''UnionType''' ) and isinstance(__lowerCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowerCamelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F""" Problem encountered in field '{field.name}'.""" )
if type(__lowerCamelCase ) not in field.type.__args__:
# filter `str` in Union
__A : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__A : int = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__A : int = (
field.type.__args__[0] if isinstance(__lowerCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__A : Tuple = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__A : Union[str, Any] = {}
if origin_type is Literal or (isinstance(field.type , __lowerCamelCase ) and issubclass(field.type , __lowerCamelCase )):
if origin_type is Literal:
__A : Union[str, Any] = field.type.__args__
else:
__A : Union[str, Any] = [x.value for x in field.type]
__A : Optional[int] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__A : Dict = field.default
else:
__A : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__A : Any = copy(__lowerCamelCase )
# Hack because type=bool in argparse does not behave as we want.
__A : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__A : Optional[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__A : Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
__A : str = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__A : int = True
elif isclass(__lowerCamelCase ) and issubclass(__lowerCamelCase , __lowerCamelCase ):
__A : str = field.type.__args__[0]
__A : List[str] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__A : Optional[int] = field.default_factory()
elif field.default is dataclasses.MISSING:
__A : Tuple = True
else:
__A : Union[str, Any] = field.type
if field.default is not dataclasses.MISSING:
__A : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
__A : List[str] = field.default_factory()
else:
__A : str = True
parser.add_argument(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__A : List[str] = False
parser.add_argument(F"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if hasattr(__lowerCamelCase , '''_argument_group_name''' ):
__A : Tuple = self.add_argument_group(dtype._argument_group_name )
else:
__A : List[Any] = self
try:
__A : Dict[str, type] = get_type_hints(__lowerCamelCase )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowerCamelCase ):
__A : List[str] = '''.'''.join(map(__lowerCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowerCamelCase ):
if not field.init:
continue
__A : int = type_hints[field.name]
self._parse_dataclass_field(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__A : Tuple = []
if args_filename:
args_files.append(Path(__lowerCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__A : Dict = ArgumentParser()
args_file_parser.add_argument(__lowerCamelCase , type=__lowerCamelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__A , __A : List[Any] = args_file_parser.parse_known_args(args=__lowerCamelCase )
__A : Dict = vars(__lowerCamelCase ).get(args_file_flag.lstrip('''-''' ) , __lowerCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(__lowerCamelCase ) for p in cmd_args_file_paths] )
__A : Any = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__A : List[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
__A , __A : Tuple = self.parse_known_args(args=__lowerCamelCase )
__A : int = []
for dtype in self.dataclass_types:
__A : List[str] = {f.name for f in dataclasses.fields(__lowerCamelCase ) if f.init}
__A : List[str] = {k: v for k, v in vars(__lowerCamelCase ).items() if k in keys}
for k in keys:
delattr(__lowerCamelCase , __lowerCamelCase )
__A : int = dtype(**__lowerCamelCase )
outputs.append(__lowerCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowerCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
__A : Tuple = set(args.keys() )
__A : Union[str, Any] = []
for dtype in self.dataclass_types:
__A : str = {f.name for f in dataclasses.fields(__lowerCamelCase ) if f.init}
__A : Optional[int] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__A : int = dtype(**__lowerCamelCase )
outputs.append(__lowerCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(__lowerCamelCase )}""" )
return tuple(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
with open(Path(__lowerCamelCase ) , encoding='''utf-8''' ) as open_json_file:
__A : List[str] = json.loads(open_json_file.read() )
__A : List[str] = self.parse_dict(__lowerCamelCase , allow_extra_keys=__lowerCamelCase )
return tuple(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
__A : Dict = self.parse_dict(yaml.safe_load(Path(__lowerCamelCase ).read_text() ) , allow_extra_keys=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 179 | 0 |
from math import ceil, sqrt
def snake_case (__lowercase = 1_000_000 ) -> int:
'''simple docstring'''
_snake_case : Union[str, Any] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_snake_case : Tuple = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_snake_case : Any = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''') | 350 | from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase_ :
_lowerCamelCase = 42
_lowerCamelCase = 42
class lowercase_ :
def __init__( self , lowercase_ ):
_snake_case : list[list[Edge]] = [[] for _ in range(lowercase_ )]
_snake_case : Union[str, Any] = size
def __getitem__( self , lowercase_ ):
return iter(self._graph[vertex] )
@property
def UpperCamelCase ( self ):
return self._size
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Optional[int] = deque([start_vertex] )
_snake_case : list[int | None] = [None] * self.size
_snake_case : Tuple = 0
while queue:
_snake_case : List[Any] = queue.popleft()
_snake_case : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case : Dict = current_distance + edge.weight
_snake_case : str = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
_snake_case : List[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 284 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a ="""\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
a ="""\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
a ="""
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Optional[Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Any = np.array(lowerCamelCase__ )
__lowerCamelCase : List[Any] = np.array(lowerCamelCase__ )
__lowerCamelCase : Any = en_sentvecs.shape[0]
# mean centering
__lowerCamelCase : Union[str, Any] = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Dict = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
__lowerCamelCase : Optional[int] = cdist(lowerCamelCase__ , lowerCamelCase__ , 'cosine' )
__lowerCamelCase : Optional[Any] = np.array(range(lowerCamelCase__ ) )
__lowerCamelCase : Dict = sim.argsort(axis=1 )[:, :1_0]
__lowerCamelCase : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Optional[Any]):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 73 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase =logging.get_logger(__name__)
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''maskformer-swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,lowerCamelCase_=2_2_4 ,lowerCamelCase_=4 ,lowerCamelCase_=3 ,lowerCamelCase_=9_6 ,lowerCamelCase_=[2, 2, 6, 2] ,lowerCamelCase_=[3, 6, 1_2, 2_4] ,lowerCamelCase_=7 ,lowerCamelCase_=4.0 ,lowerCamelCase_=True ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.1 ,lowerCamelCase_="gelu" ,lowerCamelCase_=False ,lowerCamelCase_=0.02 ,lowerCamelCase_=1E-5 ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ,) -> Optional[Any]:
super().__init__(**lowerCamelCase_ )
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = len(lowerCamelCase_ )
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = layer_norm_eps
A = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
A = ["""stem"""] + [f'stage{idx}' for idx in range(1 ,len(lowerCamelCase_ ) + 1 )]
A , A = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ ,out_indices=lowerCamelCase_ ,stage_names=self.stage_names )
| 77 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=1_2 ,lowerCamelCase_=7 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=9_9 ,lowerCamelCase_=3_2 ,lowerCamelCase_=3_2 ,lowerCamelCase_=2 ,lowerCamelCase_=4 ,lowerCamelCase_=3_7 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=0 ,lowerCamelCase_=None ,) -> List[str]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_labels
A = vocab_size
A = hidden_size
A = projection_dim
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = dropout
A = attention_dropout
A = max_position_embeddings
A = initializer_range
A = scope
A = bos_token_id
def UpperCamelCase__ ( self ) -> Tuple:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A = input_mask.numpy()
A , A = input_mask.shape
A = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
A = 1
A = 0
A = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> int:
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
A = TFBlipTextModel(config=lowerCamelCase_ )
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,training=lowerCamelCase_ )
A = model(lowerCamelCase_ ,training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase__ ( self ) -> List[str]:
A = BlipTextModelTester(self )
A = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=3_7 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase__ ( self ) -> Dict:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase__ ( self ) -> str:
pass
@slow
def UpperCamelCase__ ( self ) -> str:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFBlipTextModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_=True ) -> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase_ )
| 77 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """ViltImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Any , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase__ , )
UpperCamelCase = kwargs.pop('feature_extractor' )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = self.image_processor
def __call__( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def A ( self : int , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : str ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , )
return self.image_processor_class
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , )
return self.image_processor
| 28 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = process
lowerCamelCase = params
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
lowerCamelCase = self.dataset[i]
lowerCamelCase = self.process(_a , **self.params )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
lowerCamelCase = loader
lowerCamelCase = infer
lowerCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase = None
lowerCamelCase = loader_batch_size
# Internal bookkeeping
lowerCamelCase = None
lowerCamelCase = None
def __len__( self ):
"""simple docstring"""
return len(self.loader )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_a , _a ):
# Convert ModelOutput to tuple first
lowerCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase = self._loader_batch_data.__class__(_a )
self._loader_batch_index += 1
return result
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase = next(self.iterator )
lowerCamelCase = self.infer(_a , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase = processed
lowerCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
super().__init__(_a , _a , _a )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
lowerCamelCase = None
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.subiterator is None:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase = next(self.subiterator )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase = False
lowerCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
while not is_last:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
lowerCamelCase = processed
lowerCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
else:
lowerCamelCase = processed
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
return accumulator
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = key
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return self.dataset[i][self.key]
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = keya
lowerCamelCase = keya
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 291 | 0 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = current_set.copy()
for row_index, row in enumerate(__A ):
__UpperCamelCase = row[0]
for column_index, column in enumerate(__A ):
if magnitude == 0:
__UpperCamelCase = column
continue
__UpperCamelCase = column / magnitude
# Subtract to cancel term
__UpperCamelCase = current_set[0]
__UpperCamelCase = [first_row]
__UpperCamelCase = current_set[1::]
for row in current_set:
__UpperCamelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__A )
continue
for column_index in range(len(__A ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__A )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__UpperCamelCase = final_set[0]
__UpperCamelCase = []
__UpperCamelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__UpperCamelCase = simplify(__A )
for i in range(len(__A ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,__A )
__UpperCamelCase = resultant
return final_set
def _lowercase ( __A ):
'''simple docstring'''
if len(__A ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
__UpperCamelCase = len(__A ) + 1
if any(len(__A ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(__A ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(__A ) == 1:
return [equations[0][-1] / equations[0][0]]
__UpperCamelCase = equations.copy()
if any(0 in row for row in data_set ):
__UpperCamelCase = data_set.copy()
__UpperCamelCase = []
for row_index, row in enumerate(__A ):
if 0 not in row:
__UpperCamelCase = data_set.pop(__A )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,__A )
__UpperCamelCase = data_set.copy()
__UpperCamelCase = simplify(__A )
__UpperCamelCase = simplified[::-1]
__UpperCamelCase = []
for row in simplified:
__UpperCamelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__UpperCamelCase = row.copy()[: len(__A ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__A ) == 0:
solutions.append(0 )
continue
__UpperCamelCase = temp_row[1::]
__UpperCamelCase = temp_row[::-1]
for column_index, column in enumerate(__A ):
current_solution -= column * solutions[column_index]
solutions.append(__A )
__UpperCamelCase = []
for item in solutions:
final.append(float(round(__A ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 243 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase=0.01 , lowercase=1_0_0_0 ) -> List[Any]:
__UpperCamelCase = p_stop
__UpperCamelCase = max_length
def __iter__( self ) -> Dict:
__UpperCamelCase = 0
__UpperCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
__UpperCamelCase = random.random() < self.p_stop
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self , lowercase , lowercase , lowercase=False , lowercase=True ) -> List[str]:
__UpperCamelCase = [
BatchSamplerShard(lowercase , 2 , lowercase , split_batches=lowercase , even_batches=lowercase )
for i in range(2 )
]
__UpperCamelCase = [list(lowercase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowercase ) for shard in batch_sampler_shards] , [len(lowercase ) for e in expected] )
self.assertListEqual(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase , lowercase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__UpperCamelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
__UpperCamelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase )
# Check the shards when the dataset is very small.
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowercase , lowercase )
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Dict:
# Check the shards when the dataset is a round multiple of batch size.
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size.
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
# Check the shards when the dataset is very small.
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__UpperCamelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
# Check the shards when the dataset is very small.
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowercase )
__UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowercase , lowercase , even_batches=lowercase )
def __lowerCamelCase ( self ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size.
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
# Check the shards when the dataset is very small.
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
__UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = [[], []]
self.check_batch_sampler_shards(lowercase , lowercase , split_batches=lowercase , even_batches=lowercase )
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
__UpperCamelCase = [BatchSamplerShard(lowercase , 2 , lowercase , even_batches=lowercase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase=False , lowercase=2 , lowercase=False ) -> List[str]:
random.seed(lowercase )
__UpperCamelCase = list(lowercase )
__UpperCamelCase = [
IterableDatasetShard(
lowercase , batch_size=lowercase , drop_last=lowercase , num_processes=lowercase , process_index=lowercase , split_batches=lowercase , )
for i in range(lowercase )
]
__UpperCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowercase )
iterable_dataset_lists.append(list(lowercase ) )
__UpperCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__UpperCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowercase ) , len(lowercase ) )
self.assertTrue(len(lowercase ) % shard_batch_size == 0 )
__UpperCamelCase = []
for idx in range(0 , len(lowercase ) , lowercase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowercase ) < len(lowercase ):
reference += reference
self.assertListEqual(lowercase , reference[: len(lowercase )] )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = 4_2
__UpperCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
# Edge case with a very small dataset
__UpperCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
self.check_iterable_dataset_shards(lowercase , lowercase , batch_size=4 , drop_last=lowercase , split_batches=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=lowercase )
__UpperCamelCase = SkipBatchSampler(lowercase , 2 )
self.assertListEqual(list(lowercase ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = DataLoader(list(range(1_6 ) ) , batch_size=4 )
__UpperCamelCase = skip_first_batches(lowercase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __lowerCamelCase ( self ) -> Tuple:
Accelerator()
__UpperCamelCase = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 243 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class __magic_name__ ( _lowercase ):
'''simple docstring'''
def __init__( self, *lowercase_, **lowercase_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''', A_, )
super().__init__(*A_, **A_ )
| 188 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ):
A = AutoTokenizer.from_pretrained(snake_case__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
# Initialize accelerator
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(snake_case__ )
A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
A = model(**snake_case__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**snake_case__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
A = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
A = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def _snake_case ( ):
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main() | 74 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = list(s_dict.keys())
for key in keys:
if "transformer_layers" in key:
SCREAMING_SNAKE_CASE = s_dict.pop(_UpperCAmelCase)
elif "subsample" in key:
SCREAMING_SNAKE_CASE = s_dict.pop(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = mam_aaa['args']
SCREAMING_SNAKE_CASE = mam_aaa['model']
SCREAMING_SNAKE_CASE = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(_UpperCAmelCase)
rename_keys(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE = args.share_decoder_input_output_embed
SCREAMING_SNAKE_CASE = [int(_UpperCAmelCase) for i in args.conv_kernel_sizes.split(',')]
SCREAMING_SNAKE_CASE = SpeechaTextConfig(
vocab_size=_UpperCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(_UpperCAmelCase) , conv_channels=args.conv_channels , conv_kernel_sizes=_UpperCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_UpperCAmelCase , num_beams=5 , max_length=200 , use_cache=_UpperCAmelCase , decoder_start_token_id=2 , early_stopping=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = SpeechaTextForConditionalGeneration(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase)
if len(_UpperCAmelCase) > 0 and not set(_UpperCAmelCase) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F''' but all the following weights are missing {missing}''')
if tie_embeds:
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
SCREAMING_SNAKE_CASE = lm_head_weights
model.save_pretrained(_UpperCAmelCase)
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a_ : Tuple = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 327 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
a_ : Dict = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None , a = None) -> Optional[int]:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join('examples' , 'by_feature'))
SCREAMING_SNAKE_CASE = os.path.abspath('examples')
for item in os.listdir(a):
if item not in EXCLUDE_EXAMPLES:
SCREAMING_SNAKE_CASE = os.path.join(a , a)
if os.path.isfile(a) and ".py" in item_path:
with self.subTest(
tested_script=a , feature_script=a , tested_section='main()' if parser_only else 'training_function()' , ):
SCREAMING_SNAKE_CASE = compare_against_test(
os.path.join(a , a) , a , a , a)
SCREAMING_SNAKE_CASE = '\n'.join(a)
if special_strings is not None:
for string in special_strings:
SCREAMING_SNAKE_CASE = diff.replace(a , '')
self.assertEqual(a , '')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
self.one_complete_example('complete_nlp_example.py' , a)
self.one_complete_example('complete_nlp_example.py' , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join('examples' , 'cv_example.py'))
SCREAMING_SNAKE_CASE = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , a , a , a)
self.one_complete_example('complete_cv_example.py' , a , a , a)
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class _snake_case ( A__ ):
_lowercase : int = False
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> Union[str, Any]:
super().setUpClass()
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = os.path.join(cls._tmpdir , 'default_config.yml')
write_basic_config(save_location=cls.configPath)
SCREAMING_SNAKE_CASE = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> Dict:
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0')))
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2')))
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0')}
'''.split()
SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=a)
self.assertNotIn('epoch 0:' , a)
self.assertIn('epoch 1:' , a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2')}
'''.split()
SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=a)
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , a)
self.assertIn('epoch 1:' , a)
else:
self.assertIn('epoch 0:' , a)
self.assertIn('epoch 1:' , a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'}):
SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=a)
SCREAMING_SNAKE_CASE = re.findall('({.+})' , a)
SCREAMING_SNAKE_CASE = [r for r in results if 'accuracy' in r][-1]
SCREAMING_SNAKE_CASE = ast.literal_eval(a)
self.assertGreaterEqual(results['accuracy'] , 0.75)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'})
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
SCREAMING_SNAKE_CASE = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(a , 'tracking')))
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs)
| 327 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __A ( A_ ):
def _lowercase (self : str ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(snake_case__ )
def _lowercase (self : int ):
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(snake_case__ )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(snake_case__ ):
self.assertDictEqual(snake_case__ , example_records[i] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(snake_case__ )
UpperCAmelCase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _lowercase (self : Optional[int] ): # checks what happens with missing columns
UpperCAmelCase_ = [{"col_1": 1}, {"col_2": "x"}]
UpperCAmelCase_ = Dataset.from_list(snake_case__ )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def _lowercase (self : str ): # checks if the type can be inferred from the second record
UpperCAmelCase_ = [{"col_1": []}, {"col_1": [1, 2]}]
UpperCAmelCase_ = Dataset.from_list(snake_case__ )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = Dataset.from_list([] )
self.assertEqual(len(snake_case__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger()
@dataclass
class UpperCAmelCase :
A__ : nn.Module
A__ : List[nn.Module] = field(default_factory=A_ )
A__ : list = field(default_factory=A_ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Tensor , snake_case__ : Tensor ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__(self : List[Any] , snake_case__ : Tensor ) -> List[Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]:
'''simple docstring'''
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCAmelCase :
A__ : nn.Module
A__ : nn.Module
A__ : int = 1
A__ : List = field(default_factory=A_ )
A__ : List = field(default_factory=A_ )
A__ : bool = True
def __call__(self : List[Any] , snake_case__ : Tensor ) -> Any:
'''simple docstring'''
snake_case : str = Tracker(self.dest )(snake_case__ ).parametrized
snake_case : Optional[int] = Tracker(self.src )(snake_case__ ).parametrized
snake_case : List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) )
snake_case : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while"""
f""" destination module has {len(snake_case__ )}.""" )
for dest_m, src_m in zip(snake_case__ , snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class UpperCAmelCase ( nn.Module ):
def __init__(self : Tuple , snake_case__ : nn.Module ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f"""Unexpected layer name {k}"""
snake_case : Union[str, Any] = len(snake_case__ ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
snake_case : Optional[Any] = nn.ModuleDict(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Tensor ) -> Dict:
'''simple docstring'''
return get_trunk_forward_outputs(
snake_case__ , out_feat_keys=snake_case__ , feature_blocks=self._feature_blocks , )
class UpperCAmelCase ( A_ ):
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> str:
'''simple docstring'''
snake_case : List[Any] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self : Optional[int] , snake_case__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
snake_case : Dict = self.convert_name_to_timm(snake_case__ )
snake_case : Union[str, Any] = partial(lambda: (timm.create_model(snake_case__ , pretrained=snake_case__ ).eval(), None) )
else:
snake_case : List[str] = super().__getitem__(snake_case__ )
return val
class UpperCAmelCase ( A_ ):
def __getitem__(self : Dict , snake_case__ : str ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
snake_case : str = RegNetModel
else:
snake_case : Optional[Any] = RegNetForImageClassification
return val
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Tuple[str, str]] ):
for from_key, to_key in keys:
snake_case : str = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : RegNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True , ):
print(f"""Converting {name}...""" )
with torch.no_grad():
snake_case , snake_case : int = from_model_func()
snake_case : str = our_model_func(__lowerCamelCase ).eval()
snake_case : int = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase , raise_if_mismatch=__lowerCamelCase )
snake_case : Dict = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
if from_state_dict is not None:
snake_case : str = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
snake_case : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
snake_case : Optional[Any] = manually_copy_vissl_head(__lowerCamelCase , our_model.state_dict() , __lowerCamelCase )
our_model.load_state_dict(__lowerCamelCase )
snake_case : Any = our_model(__lowerCamelCase , output_hidden_states=__lowerCamelCase )
snake_case : Union[str, Any] = (
our_outputs.logits if isinstance(__lowerCamelCase , __lowerCamelCase ) else our_outputs.last_hidden_state
)
snake_case : Union[str, Any] = from_model(__lowerCamelCase )
snake_case : Dict = from_output[-1] if type(__lowerCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
snake_case : Any = our_outputs.hidden_states[-1]
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__lowerCamelCase , )
snake_case : List[str] = 224 if "seer" not in name else 384
# we can use the convnext one
snake_case : int = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__lowerCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__lowerCamelCase , )
print(f"""Pushed {name}""" )
def UpperCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ):
snake_case : Union[str, Any] = "imagenet-1k-id2label.json"
snake_case : List[str] = 1000
snake_case : List[str] = (1, num_labels)
snake_case : Any = "huggingface/label-files"
snake_case : List[str] = num_labels
snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
snake_case : List[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
snake_case : str = idalabel
snake_case : List[Any] = {v: k for k, v in idalabel.items()}
snake_case : Dict = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
snake_case : Optional[Any] = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
snake_case : Union[str, Any] = NameToOurModelFuncMap()
snake_case : str = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
snake_case : List[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , model_dir=str(__lowerCamelCase ) , map_location="cpu" )
snake_case : Dict = model_func()
# check if we have a head, if yes add it
snake_case : str = files["classy_state_dict"]["base_model"]["model"]
snake_case : Dict = model_state_dict["trunk"]
model.load_state_dict(__lowerCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
snake_case : List[Any] = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : Optional[int] = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : List[str] = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case : Tuple = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
snake_case : List[Any] = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : Tuple = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : str = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case : Dict = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 59 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int ): # noqa: E741
'''simple docstring'''
snake_case_ = len(snake_case )
snake_case_ = 0
snake_case_ = [0] * n
snake_case_ = [False] * n
snake_case_ = [False] * n
def dfs(snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Union[str, Any] ):
if parent == root:
out_edge_count += 1
snake_case_ = True
snake_case_ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
snake_case_ = dfs(snake_case , snake_case , snake_case , snake_case )
snake_case_ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
snake_case_ = True
# AP found via cycle
if at == low[to]:
snake_case_ = True
else:
snake_case_ = min(low[at] , snake_case )
return out_edge_count
for i in range(snake_case ):
if not visited[i]:
snake_case_ = 0
snake_case_ = dfs(snake_case , snake_case , -1 , snake_case )
snake_case_ = out_edge_count > 1
for x in range(len(snake_case ) ):
if is_art[x] is True:
print(snake_case )
# Adjacency list of graph
_SCREAMING_SNAKE_CASE : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 92 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : List[str] = IFPipeline
lowerCAmelCase_ : int = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
lowerCAmelCase_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ : List[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def lowerCAmelCase__ ( self , a__ , a__=0 ) -> str:
'''simple docstring'''
if str(a__ ).startswith("mps" ):
snake_case_ = torch.manual_seed(a__ )
else:
snake_case_ = torch.Generator(device=a__ ).manual_seed(a__ )
snake_case_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
snake_case_ = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=a__ , tokenizer=a__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
snake_case_ , snake_case_ = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
snake_case_ = None
snake_case_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
snake_case_ = IFImgaImgPipeline(**pipe_a.components )
snake_case_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
snake_case_ = IFInpaintingPipeline(**pipe_a.components )
snake_case_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a__ , a__ , a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a__ )
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def UpperCamelCase_( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 92 | 1 |
import math
def lowerCamelCase_ ( _a : list , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = len(_a )
UpperCAmelCase_ : Tuple = int(math.floor(math.sqrt(_a ) ) )
UpperCAmelCase_ : Union[str, Any] = 0
while arr[min(_a , _a ) - 1] < x:
UpperCAmelCase_ : Tuple = step
step += int(math.floor(math.sqrt(_a ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCAmelCase_ : str = prev + 1
if prev == min(_a , _a ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
UpperCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(''',''')]
UpperCamelCase_ = int(input('''Enter the number to be searched:\n'''))
UpperCamelCase_ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F"Number {x} is at index {res}")
| 345 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __a ( ) ->List[Any]:
"""simple docstring"""
A = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
A = Dataset.from_dict(UpperCAmelCase )
return dataset
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = get_dataset()
A = make_duplicate_clusters(_lowerCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A (self : List[str] ):
A = get_dataset()
A , A = deduplicate_dataset(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 2 )
print(_lowerCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowerCAmelCase )
| 337 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_lowerCamelCase : Dict = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_lowerCamelCase : Optional[Any] = {
'ctrl': 256,
}
_lowerCamelCase : List[str] = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(UpperCAmelCase )
return pairs
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTROL_CODES
def __init__(self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]="<unk>" , **_lowerCAmelCase : Dict ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
A = json.load(_lowerCAmelCase )
A = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
A = merges_handle.read().split("""\n""" )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = {}
@property
def A (self : Tuple ):
return len(self.encoder )
def A (self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def A (self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
A = tuple(_lowerCAmelCase )
A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
A = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(_lowerCAmelCase ):
try:
A = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(_lowerCAmelCase )
A = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
A = get_pairs(_lowerCAmelCase )
A = """@@ """.join(_lowerCAmelCase )
A = word[:-4]
A = word
return word
def A (self : List[str] , _lowerCAmelCase : Dict ):
A = []
A = re.findall(r"""\S+\n?""" , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def A (self : str , _lowerCAmelCase : int ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A (self : Dict , _lowerCAmelCase : str ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
A = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 337 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase : Any = 16
__lowerCamelCase : List[Any] = 32
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 , __UpperCamelCase : str = "bert-base-cased" ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
model.eval()
SCREAMING_SNAKE_CASE__ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
SCREAMING_SNAKE_CASE__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
SCREAMING_SNAKE_CASE__ = metric.compute()
return eval_metric["accuracy"]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config["""lr"""]
SCREAMING_SNAKE_CASE__ = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ = args.model_name_or_path
set_seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE__ = optimizer_cls(params=model.parameters() , lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE__ = DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE__ = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = evaluate.load("""glue""" , """mrpc""" )
SCREAMING_SNAKE_CASE__ = num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE__ = args.resume_from_checkpoint.split("""epoch_""" )[1]
SCREAMING_SNAKE_CASE__ = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE__ = int(__UpperCamelCase ) + 1
SCREAMING_SNAKE_CASE__ = evaluation_loop(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
accelerator.print("""resumed checkpoint performance:""" , __UpperCamelCase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE__ = {}
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.loss
SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE__ = f"""epoch_{epoch}"""
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , __UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = evaluation_loop(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = accuracy
SCREAMING_SNAKE_CASE__ = lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE__ = optimizer.param_groups[0]["""lr"""]
SCREAMING_SNAKE_CASE__ = epoch
SCREAMING_SNAKE_CASE__ = overall_step
accelerator.print(f"""epoch {epoch}:""" , __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__UpperCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCamelCase , )
parser.add_argument(
"""--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=__UpperCamelCase , default=2 , help="""Number of train epochs.""" , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 219 | from numpy import exp, pi, sqrt
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | 1 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
a_ = logging.getLogger()
def a__ ( __lowercase , __lowercase ) -> int:
_A = "\n".join(__lowercase )
Path(__lowercase ).open("w" ).writelines(__lowercase )
a_ = "patrickvonplaten/t5-tiny-random"
a_ = "sshleifer/bart-tiny-random"
a_ = "sshleifer/tiny-mbart"
a_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class snake_case ( _UpperCamelCase):
def a_ ( self : int , a__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
_A = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
_A = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(a__ , a__ )
_A = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
_A = "translation_en_to_de" if model == T5_TINY else "summarization"
_A = F"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(a__ , "argv" , a__ ):
run_generate()
assert Path(a__ ).exists()
# os.remove(Path(output_file_name))
def a_ ( self : int ) -> Tuple:
'''simple docstring'''
self.run_eval_tester(a__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def a_ ( self : List[Any] , a__ : Dict ) -> Dict:
'''simple docstring'''
self.run_eval_tester(a__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def a_ ( self : Optional[int] , a__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_A = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
_A = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
_A = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
_A = Path(self.get_auto_remove_tmp_dir() )
_A = str(tmp_dir / "scores.json" )
_A = str(tmp_dir / "val.target" )
_dump_articles(a__ , text["en"] )
_dump_articles(a__ , text["de"] )
_A = "translation_en_to_de" if model == T5_TINY else "summarization"
_A = F"""
run_eval_search.py
{model}
{str(a__ )}
{str(a__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(a__ , "argv" , a__ ):
with CaptureStdout() as cs:
run_search()
_A = [" num_beams | length_penalty", model, "Best score args"]
_A = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(a__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(a__ ).exists()
os.remove(Path(a__ ) ) | 368 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("covid_data", "cases deaths recovered")
def a__ ( __lowercase = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
_A = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(__lowercase ).content ).xpath(__lowercase ) )
a_ = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats())) | 163 | 0 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__SCREAMING_SNAKE_CASE : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__SCREAMING_SNAKE_CASE : Dict = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
assert len(str(_SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case_ = year // 100
snake_case_ = (5 * (century % 4) + 2) % 7
snake_case_ = year % 100
snake_case_ = centurian % 12
snake_case_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347 | 1 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
_lowerCamelCase = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Dict ) -> str:
UpperCAmelCase_ = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ = int(re.match(R'''.*layer_(\d*).*''' , snake_case__ )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> int:
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ = re.search(R'''[^\d](\d+)$''' , str(snake_case__ ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
UpperCAmelCase_ = int(bit_search.groups()[0] )
return bit_size // 8
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ) -> List[str]:
# Construct model
if bloom_config_file == "":
UpperCAmelCase_ = BloomConfig()
else:
UpperCAmelCase_ = BloomConfig.from_json_file(snake_case__ )
if shard_model:
UpperCAmelCase_ = os.listdir(snake_case__ )
UpperCAmelCase_ = sorted(filter(lambda __UpperCamelCase : s.startswith('''layer''' ) and "model_00" in s , snake_case__ ) )
UpperCAmelCase_ = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = BloomConfig()
for j, file in enumerate(snake_case__ ):
print('''Processing file: {}'''.format(snake_case__ ) )
UpperCAmelCase_ = None
for i in range(snake_case__ ):
# load all TP files
UpperCAmelCase_ = file.replace('''model_00''' , f'model_0{i}' )
UpperCAmelCase_ = torch.load(os.path.join(snake_case__ , snake_case__ ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ = list(temp.keys() )
for key in keys:
UpperCAmelCase_ = temp.pop(snake_case__ )
if tensors is None:
UpperCAmelCase_ = temp
else:
for key in tensors.keys():
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=snake_case__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ = tensors[key] / pretraining_tp
torch.save(
snake_case__ , os.path.join(
snake_case__ , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(snake_case__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(snake_case__ ) ).zfill(5 ) )
UpperCAmelCase_ = BloomConfig()
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ = total_size
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(snake_case__ , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n'''
f.write(snake_case__ )
else:
UpperCAmelCase_ = BloomModel(snake_case__ )
UpperCAmelCase_ = os.listdir(snake_case__ )
UpperCAmelCase_ = sorted(filter(lambda __UpperCamelCase : s.startswith('''layer''' ) and "model_00" in s , snake_case__ ) )
UpperCAmelCase_ = None
for i, file in enumerate(snake_case__ ):
UpperCAmelCase_ = None
for i in range(snake_case__ ):
# load all TP files
UpperCAmelCase_ = file.replace('''model_00''' , f'model_0{i}' )
UpperCAmelCase_ = torch.load(os.path.join(snake_case__ , snake_case__ ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ = list(temp.keys() )
for key in keys:
UpperCAmelCase_ = temp.pop(snake_case__ )
if tensors is None:
UpperCAmelCase_ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=snake_case__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ = tensors[key] / pretraining_tp
UpperCAmelCase_ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
UpperCAmelCase_ = set(other_keys.missing_keys )
else:
UpperCAmelCase_ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(snake_case__ , exist_ok=snake_case__ )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
UpperCAmelCase_ = model.to(config.torch_dtype )
torch.save(model.state_dict() , snake_case__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
_lowerCamelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 365 |
import baseaa
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> bytes:
return baseaa.baaencode(string.encode('''utf-8''' ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : bytes ) -> str:
return baseaa.baadecode(__UpperCamelCase ).decode('''utf-8''' )
if __name__ == "__main__":
_lowerCamelCase = 'Hello World!'
_lowerCamelCase = baseaa_encode(test)
print(encoded)
_lowerCamelCase = baseaa_decode(encoded)
print(decoded)
| 177 | 0 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase_ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
lowercase_ = get_tests_dir("""fixtures/vocab.json""")
lowercase_ = get_tests_dir("""fixtures""")
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = 0
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(A , A )
def snake_case_( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = WavaVecaConfig()
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(A )
processor.save_pretrained(A )
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A , A )
def snake_case_( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(A , os.path.join(A , A ) )
copyfile(A , os.path.join(A , """vocab.json""" ) )
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A , A )
def snake_case_( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor()
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_SCREAMING_SNAKE_CASE = WavaVecaProcessor(A , A )
# save in new folder
processor.save_pretrained(A )
# drop `processor_class` in tokenizer
with open(os.path.join(A , A ) , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.load(A )
config_dict.pop("""processor_class""" )
with open(os.path.join(A , A ) , """w""" ) as f:
f.write(json.dumps(A ) )
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A , A )
def snake_case_( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor()
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_SCREAMING_SNAKE_CASE = WavaVecaProcessor(A , A )
# save in new folder
processor.save_pretrained(A )
# drop `processor_class` in feature extractor
with open(os.path.join(A , A ) , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.load(A )
config_dict.pop("""processor_class""" )
with open(os.path.join(A , A ) , """w""" ) as f:
f.write(json.dumps(A ) )
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A , A )
def snake_case_( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(A )
# copy relevant files
copyfile(A , os.path.join(A , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(A , A ) , """w""" ) as f:
f.write("""{}""" )
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A , A )
def snake_case_( self ) -> Optional[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A ):
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A ):
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=A )
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=A )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
_SCREAMING_SNAKE_CASE = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
_SCREAMING_SNAKE_CASE = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=A , use_fast=A )
_SCREAMING_SNAKE_CASE = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def snake_case_( self ) -> List[Any]:
try:
AutoConfig.register("""custom""" , A )
AutoFeatureExtractor.register(A , A )
AutoTokenizer.register(A , slow_tokenizer_class=A )
AutoProcessor.register(A , A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A ):
AutoProcessor.register(A , A )
# Now that the config is registered, it can be used as any other config with the auto-API
_SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(A )
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE = os.path.join(A , """vocab.txt""" )
with open(A , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = CustomTokenizer(A )
_SCREAMING_SNAKE_CASE = CustomProcessor(A , A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(A )
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A , A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case_( self ) -> str:
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = False
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = False
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''AutoFeatureExtractor'''
UpperCamelCase = '''AutoTokenizer'''
UpperCamelCase = False
try:
AutoConfig.register("""custom""" , A )
AutoFeatureExtractor.register(A , A )
AutoTokenizer.register(A , slow_tokenizer_class=A )
AutoProcessor.register(A , A )
# If remote code is not set, the default is to use local classes.
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=A )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=A )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def snake_case_( cls ) -> List[str]:
_SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(A )
@classmethod
def snake_case_( cls ) -> int:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = WavaVecaProcessor.from_pretrained(A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(A , """test-processor""" ) , push_to_hub=A , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE = WavaVecaProcessor.from_pretrained(f'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(A , getattr(new_processor.feature_extractor , A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = WavaVecaProcessor.from_pretrained(A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(A , """test-processor-org""" ) , push_to_hub=A , use_auth_token=self._token , organization="""valid_org""" , )
_SCREAMING_SNAKE_CASE = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(A , getattr(new_processor.feature_extractor , A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def snake_case_( self ) -> Optional[Any]:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(A )
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE = os.path.join(A , """vocab.txt""" )
with open(A , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = CustomTokenizer(A )
_SCREAMING_SNAKE_CASE = CustomProcessor(A , A )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'{USER}/test-dynamic-processor' , token=self._token )
_SCREAMING_SNAKE_CASE = Repository(A , clone_from=f'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(A )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(A , """tokenizer_config.json""" ) ) as f:
_SCREAMING_SNAKE_CASE = json.load(A )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(A , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(A , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(A , """custom_processing.py""" ) ) )
repo.push_to_hub()
_SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained(f'{USER}/test-dynamic-processor' , trust_remote_code=A )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 58 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : str = ["pixel_values"]
def __init__( self, __magic_name__ = True, __magic_name__ = 32, __magic_name__=PILImageResampling.BILINEAR, __magic_name__ = True, **__magic_name__, ) -> None:
"""simple docstring"""
UpperCamelCase__ : int = do_resize
UpperCamelCase__ : Tuple = do_rescale
UpperCamelCase__ : Any = size_divisor
UpperCamelCase__ : List[Any] = resample
super().__init__(**__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__ ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase__ : Any = height // size_divisor * size_divisor
UpperCamelCase__ : Optional[int] = width // size_divisor * size_divisor
UpperCamelCase__ : str = resize(__magic_name__, (new_h, new_w), resample=__magic_name__, data_format=__magic_name__, **__magic_name__ )
return image
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__ ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__, scale=__magic_name__, data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__=None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = ChannelDimension.FIRST, **__magic_name__, ) -> BatchFeature:
"""simple docstring"""
UpperCamelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase__ : str = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCamelCase__ : Union[str, Any] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[Any] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCamelCase__ : str = [self.resize(__magic_name__, size_divisor=__magic_name__, resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCamelCase__ : List[Any] = [self.rescale(__magic_name__, scale=1 / 255 ) for image in images]
UpperCamelCase__ : Optional[Any] = [to_channel_dimension_format(__magic_name__, __magic_name__ ) for image in images]
UpperCamelCase__ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__, tensor_type=__magic_name__ )
| 201 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowercase__ : Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}")
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowercase__ : Optional[Any] = json.loads(_lowerCamelCase)
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowercase__ : List[str] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowercase__ : List[Any] = json.loads(_lowerCamelCase)
if not mpi_options.get("sagemaker_mpi_enabled" , _lowerCamelCase):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class snake_case_ ( __A ):
__A : str = field(
default="" ,metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} ,)
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase_ , )
@cached_property
def __UpperCamelCase ( self : Dict ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowercase__ : List[Any] = torch.device("cpu" )
lowercase__ : Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
lowercase__ : Tuple = smp.local_rank()
lowercase__ : Union[str, Any] = torch.device("cuda" , lowercase_ )
lowercase__ : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowercase__ : List[Any] = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowercase__ : Tuple = torch.device("cuda" , self.local_rank )
lowercase__ : Any = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowercase__ : Any = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowercase__ : Optional[int] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowercase__ : Dict = torch.device("cuda" , self.local_rank )
lowercase__ : Tuple = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase_ )
return device
@property
def __UpperCamelCase ( self : Tuple ) -> List[str]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __UpperCamelCase ( self : str ) -> str:
return not is_sagemaker_model_parallel_available()
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
return False
| 351 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileBertTokenizer
lowercase__ = MobileBertTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = filter_non_english
lowercase__ = "google/mobilebert-uncased"
def __lowerCAmelCase ( self : Any ):
super().setUp()
lowerCAmelCase__ : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCAmelCase__ : str = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __lowerCAmelCase ( self : str ,lowercase_ : Optional[int] ):
lowerCAmelCase__ : Optional[int] = '''UNwant\u00E9d,running'''
lowerCAmelCase__ : Tuple = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Optional[Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowercase_ ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) ,[9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Dict = self.get_rust_tokenizer()
lowerCAmelCase__ : str = '''UNwant\u00E9d,running'''
lowerCAmelCase__ : Dict = tokenizer.tokenize(lowercase_ )
lowerCAmelCase__ : str = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : List[str] = tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Any = rust_tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : str = tokenizer.encode(lowercase_ )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
# With lower casing
lowerCAmelCase__ : int = self.get_tokenizer(do_lower_case=lowercase_ )
lowerCAmelCase__ : Optional[Any] = self.get_rust_tokenizer(do_lower_case=lowercase_ )
lowerCAmelCase__ : Optional[Any] = '''UNwant\u00E9d,running'''
lowerCAmelCase__ : List[Any] = tokenizer.tokenize(lowercase_ )
lowerCAmelCase__ : Any = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Tuple = rust_tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : int = self.get_rust_tokenizer()
lowerCAmelCase__ : str = tokenizer.encode(lowercase_ )
lowerCAmelCase__ : List[str] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''h\u00E9llo'''] )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Any = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=lowercase_ ,never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCAmelCase__ : Tuple = {}
for i, token in enumerate(lowercase_ ):
lowerCAmelCase__ : Optional[Any] = i
lowerCAmelCase__ : List[Any] = WordpieceTokenizer(vocab=lowercase_ ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) ,['''[UNK]''', '''runn''', '''##ing'''] )
def __lowerCAmelCase ( self : Dict ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self : List[Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self : Any ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase_ ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase_ ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('''sequence builders''' ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Any = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase_ ,lowercase_ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __lowerCAmelCase ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : List[str] = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase__ : Union[str, Any] = tokenizer_r.encode_plus(
lowercase_ ,return_attention_mask=lowercase_ ,return_token_type_ids=lowercase_ ,return_offsets_mapping=lowercase_ ,add_special_tokens=lowercase_ ,)
lowerCAmelCase__ : List[Any] = tokenizer_r.do_lower_case if hasattr(lowercase_ ,'''do_lower_case''' ) else False
lowerCAmelCase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''] )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Union[str, Any] = ['''的''', '''人''', '''有''']
lowerCAmelCase__ : Optional[Any] = ''''''.join(lowercase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Dict = tokenizer_p.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Tuple = tokenizer_r.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Tuple = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowerCAmelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase_ ,lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Any = tokenizer_r.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : List[str] = tokenizer_p.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Any = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowerCAmelCase__ : Dict = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase__ : int = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(lowercase_ )
]
self.assertListEqual(lowercase_ ,lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
| 106 |
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowercase_ = 0
if debug == 1:
lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowercase_ = 67
lowercase_ = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowercase : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
_lowercase : Tuple = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = 42
class __magic_name__ :
def __init__( self : Union[str, Any] , lowercase_ : Iterable[int] ):
lowercase_ : Node | None = None
for i in sorted(lowercase_ , reverse=lowercase_ ):
lowercase_ : Optional[int] = Node(lowercase_ , self.head )
def __iter__( self : Optional[int] ):
lowercase_ : Tuple = self.head
while node:
yield node.data
lowercase_ : str = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : List[str] ):
return " -> ".join([str(lowercase_ ) for node in self] )
def lowerCamelCase ( UpperCAmelCase__ : SortedLinkedList , UpperCAmelCase__ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 365 | '''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __magic_name__ ( ctypes.Structure):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCamelCase ( ) -> List[Any]:
if os.name == "nt":
lowercase_ : List[Any] = CursorInfo()
lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> str:
if os.name == "nt":
lowercase_ : int = CursorInfo()
lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : Optional[int] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> Any:
try:
hide_cursor()
yield
finally:
show_cursor()
| 21 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values']
def __init__( self : Optional[int] ,lowercase__ : bool = True ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : Dict[str, int] = None ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,**lowercase__ : Tuple ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__lowercase = get_size_dict(lowercase__ )
__lowercase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = do_normalize
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = size
__lowercase = resample
__lowercase = rescale_factor
__lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Optional[Any] ,):
__lowercase = get_size_dict(lowercase__ )
if "shortest_edge" in size:
__lowercase = get_resize_output_image_size(lowercase__ ,size=size['''shortest_edge'''] ,default_to_square=lowercase__ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__lowercase = (size['''height'''], size['''width'''])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(lowercase__ ,size=lowercase__ ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : int ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowercase__ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : float ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Any ):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Union[float, List[float]] ,lowercase__ : Union[float, List[float]] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[str] ,):
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[bool] = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = None ,lowercase__ : bool = None ,lowercase__ : int = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[float] = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowercase__ : Tuple ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''' ,default_to_square=lowercase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ )
if not is_batched(lowercase__ ):
__lowercase = [images]
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=lowercase__ ,size=lowercase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowercase__ ,scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ,mean=lowercase__ ,std=lowercase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
| 104 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __SCREAMING_SNAKE_CASE( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self: Any , UpperCamelCase: Optional[int]=None , **UpperCamelCase: Union[str, Any] ) -> int:
super().__init__(features=UpperCamelCase )
snake_case__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Any ) -> List[str]:
import torch
if isinstance(UpperCamelCase , UpperCamelCase ) and column:
if all(
isinstance(UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase )
return column
def lowerCAmelCase_ ( self: str , UpperCamelCase: Dict ) -> Union[str, Any]:
import torch
if isinstance(UpperCamelCase , (str, bytes, type(UpperCamelCase )) ):
return value
elif isinstance(UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ = {}
if isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
snake_case__ = {'dtype': torch.intaa}
elif isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase , PIL.Image.Image ):
snake_case__ = np.asarray(UpperCamelCase )
return torch.tensor(UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: str ) -> Any:
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase , '__array__' ) and not isinstance(UpperCamelCase , torch.Tensor ):
snake_case__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
elif isinstance(UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: dict ) -> List[str]:
return map_nested(self._recursive_tensorize , UpperCamelCase , map_list=UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pa.Table ) -> Mapping:
snake_case__ = self.numpy_arrow_extractor().extract_row(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_row(UpperCamelCase )
return self.recursive_tensorize(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pa.Table ) -> "torch.Tensor":
snake_case__ = self.numpy_arrow_extractor().extract_column(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_column(UpperCamelCase , pa_table.column_names[0] )
snake_case__ = self.recursive_tensorize(UpperCamelCase )
snake_case__ = self._consolidate(UpperCamelCase )
return column
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: pa.Table ) -> Mapping:
snake_case__ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase )
snake_case__ = self.python_features_decoder.decode_batch(UpperCamelCase )
snake_case__ = self.recursive_tensorize(UpperCamelCase )
for column_name in batch:
snake_case__ = self._consolidate(batch[column_name] )
return batch
| 307 | 0 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = XLMProphetNetTokenizer
__magic_name__ = False
__magic_name__ = True
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a : int = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Optional[Any] = '[PAD]'
a : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_lowercase ) , 1_0_1_2 )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Union[str, Any] = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase )
a : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
a : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
a : Any = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
a : int = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Optional[Any] = 'Hello World!'
a : Dict = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Optional[int] = {'input_ids': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 366 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_UpperCamelCase : Optional[Any] = re.compile(r'\b(a|an|the)\b', re.UNICODE)
_UpperCamelCase : str = None
def snake_case ():
'''simple docstring'''
a : Any = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=A_ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=A_ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def snake_case (A_ :Optional[int] ):
'''simple docstring'''
a : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def snake_case (A_ :List[Any] ):
'''simple docstring'''
def remove_articles(A_ :str ):
return ARTICLES_REGEX.sub(' ' , A_ )
def white_space_fix(A_ :str ):
return " ".join(text.split() )
def remove_punc(A_ :Dict ):
a : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ :Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case (A_ :str ):
'''simple docstring'''
if not s:
return []
return normalize_answer(A_ ).split()
def snake_case (A_ :int , A_ :Union[str, Any] ):
'''simple docstring'''
return int(normalize_answer(A_ ) == normalize_answer(A_ ) )
def snake_case (A_ :Optional[int] , A_ :str ):
'''simple docstring'''
a : int = get_tokens(A_ )
a : Tuple = get_tokens(A_ )
a : List[Any] = collections.Counter(A_ ) & collections.Counter(A_ )
a : Dict = sum(common.values() )
if len(A_ ) == 0 or len(A_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a : List[Any] = 1.0 * num_same / len(A_ )
a : Optional[Any] = 1.0 * num_same / len(A_ )
a : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case (A_ :Any , A_ :Dict ):
'''simple docstring'''
a : Union[str, Any] = {}
a : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a : str = qa['id']
a : Dict = [t for t in qa['answers']['text'] if normalize_answer(A_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a : Dict = ['']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
a : Optional[Any] = preds[qid]
# Take max over all gold answers
a : str = max(compute_exact(A_ , A_ ) for a in gold_answers )
a : List[Any] = max(compute_fa(A_ , A_ ) for a in gold_answers )
return exact_scores, fa_scores
def snake_case (A_ :Union[str, Any] , A_ :List[Any] , A_ :List[Any] , A_ :Dict ):
'''simple docstring'''
a : List[str] = {}
for qid, s in scores.items():
a : Union[str, Any] = na_probs[qid] > na_prob_thresh
if pred_na:
a : int = float(not qid_to_has_ans[qid] )
else:
a : Union[str, Any] = s
return new_scores
def snake_case (A_ :Tuple , A_ :int , A_ :Tuple=None ):
'''simple docstring'''
if not qid_list:
a : Optional[int] = len(A_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
a : List[Any] = len(A_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def snake_case (A_ :str , A_ :Dict , A_ :List[Any] ):
'''simple docstring'''
for k in new_eval:
a : Union[str, Any] = new_eval[k]
def snake_case (A_ :Optional[Any] , A_ :Any , A_ :Dict , A_ :Optional[int] ):
'''simple docstring'''
plt.step(A_ , A_ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(A_ , A_ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(A_ )
plt.savefig(A_ )
plt.clf()
def snake_case (A_ :List[str] , A_ :str , A_ :Any , A_ :Any , A_ :List[Any]=None , A_ :Union[str, Any]=None ):
'''simple docstring'''
a : Optional[int] = sorted(A_ , key=lambda A_ : na_probs[k] )
a : Tuple = 0.0
a : Tuple = 1.0
a : Any = 0.0
a : int = [1.0]
a : int = [0.0]
a : str = 0.0
for i, qid in enumerate(A_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a : Tuple = true_pos / float(i + 1 )
a : Any = true_pos / float(A_ )
if i == len(A_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(A_ )
recalls.append(A_ )
if out_image:
plot_pr_curve(A_ , A_ , A_ , A_ )
return {"ap": 100.0 * avg_prec}
def snake_case (A_ :Optional[int] , A_ :Any , A_ :List[Any] , A_ :int , A_ :int , A_ :List[str] ):
'''simple docstring'''
if out_image_dir and not os.path.exists(A_ ):
os.makedirs(A_ )
a : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a : List[str] = make_precision_recall_eval(
A_ , A_ , A_ , A_ , out_image=os.path.join(A_ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
a : Optional[Any] = make_precision_recall_eval(
A_ , A_ , A_ , A_ , out_image=os.path.join(A_ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
a : Any = {k: float(A_ ) for k, v in qid_to_has_ans.items()}
a : Optional[int] = make_precision_recall_eval(
A_ , A_ , A_ , A_ , out_image=os.path.join(A_ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(A_ , A_ , 'pr_exact' )
merge_eval(A_ , A_ , 'pr_f1' )
merge_eval(A_ , A_ , 'pr_oracle' )
def snake_case (A_ :List[str] , A_ :List[str] , A_ :List[Any] , A_ :str ):
'''simple docstring'''
if not qid_list:
return
a : List[Any] = [na_probs[k] for k in qid_list]
a : List[str] = np.ones_like(A_ ) / float(len(A_ ) )
plt.hist(A_ , weights=A_ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(A_ , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def snake_case (A_ :Tuple , A_ :Tuple , A_ :List[str] , A_ :List[str] ):
'''simple docstring'''
a : Any = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a : List[str] = num_no_ans
a : List[str] = cur_score
a : str = 0.0
a : Union[str, Any] = sorted(A_ , key=lambda A_ : na_probs[k] )
for i, qid in enumerate(A_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a : Optional[int] = scores[qid]
else:
if preds[qid]:
a : Dict = -1
else:
a : Optional[Any] = 0
cur_score += diff
if cur_score > best_score:
a : List[Any] = cur_score
a : str = na_probs[qid]
return 100.0 * best_score / len(A_ ), best_thresh
def snake_case (A_ :List[Any] , A_ :List[Any] , A_ :str , A_ :int , A_ :Optional[Any] , A_ :Union[str, Any] ):
'''simple docstring'''
a, a : Any = find_best_thresh(A_ , A_ , A_ , A_ )
a, a : List[Any] = find_best_thresh(A_ , A_ , A_ , A_ )
a : Union[str, Any] = best_exact
a : List[Any] = exact_thresh
a : List[str] = best_fa
a : Any = fa_thresh
def snake_case ():
'''simple docstring'''
with open(OPTS.data_file ) as f:
a : List[str] = json.load(A_ )
a : Tuple = dataset_json['data']
with open(OPTS.pred_file ) as f:
a : List[Any] = json.load(A_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a : int = json.load(A_ )
else:
a : List[Any] = {k: 0.0 for k in preds}
a : List[str] = make_qid_to_has_ans(A_ ) # maps qid to True/False
a : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if v]
a : Dict = [k for k, v in qid_to_has_ans.items() if not v]
a, a : List[Any] = get_raw_scores(A_ , A_ )
a : Any = apply_no_ans_threshold(A_ , A_ , A_ , OPTS.na_prob_thresh )
a : Any = apply_no_ans_threshold(A_ , A_ , A_ , OPTS.na_prob_thresh )
a : Union[str, Any] = make_eval_dict(A_ , A_ )
if has_ans_qids:
a : Dict = make_eval_dict(A_ , A_ , qid_list=A_ )
merge_eval(A_ , A_ , 'HasAns' )
if no_ans_qids:
a : Tuple = make_eval_dict(A_ , A_ , qid_list=A_ )
merge_eval(A_ , A_ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(A_ , A_ , A_ , A_ , A_ , A_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(A_ , A_ , A_ , A_ , A_ , OPTS.out_image_dir )
histogram_na_prob(A_ , A_ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(A_ , A_ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(A_ , A_ )
else:
print(json.dumps(A_ , indent=2 ) )
if __name__ == "__main__":
_UpperCamelCase : Tuple = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 186 | 0 |
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a , __a ) -> int | float:
"""simple docstring"""
if len(__a ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(__a )
or left < -len(__a )
or right >= len(__a )
or right < -len(__a )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE_ : str = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE_ : int = find_max(__a , __a , __a ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE_ : List[str] = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 91 |
"""simple docstring"""
def lowercase__ ( ) -> str:
'''simple docstring'''
lowercase : List[str] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_UpperCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 255 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: List[str] = tempfile.mkdtemp()
_A: List[str] = 8
# DPR tok
_A: List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A: Optional[int] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_A: Optional[int] = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_A: str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A: int = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_A: Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A: Tuple = {"""unk_token""": """<unk>"""}
_A: str = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_A: List[str] = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Optional[int] = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Union[str, Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Union[str, Any] = self.get_dummy_dataset()
_A: Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_A: Union[str, Any] = dataset
_A: List[Any] = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: str = self.get_dummy_dataset()
_A: Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_A: List[Any] = os.path.join(self.tmpdirname , '''dataset''' )
_A: Dict = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_A: Any = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_A: Tuple = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: int = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_A: str = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_A: Any = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_A: Optional[int] = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) )
_A: Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_A: str = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[Any] = 1
_A: Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
_A: List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A: int = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Any = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_A: List[Any] = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
_A: List[str] = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_A: Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A: Dict = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: List[str] = 1
_A: Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
_A: str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A: Optional[int] = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: str = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_A: Any = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_A: Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A: Dict = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Tuple = 1
_A: Any = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
_A: str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A: Any = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_A: int = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_A: List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A: Optional[int] = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Any = 1
_A: Tuple = self.get_dummy_legacy_index_retriever()
_A: List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A: Optional[int] = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_A: int = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_A: Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A: Any = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __magic_name__ ( self : Any ):
"""simple docstring"""
import torch
_A: Any = 1
_A: Dict = self.get_dummy_canonical_hf_index_retriever()
_A: Tuple = [[5, 7], [1_0, 1_1]]
_A: List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A: Union[str, Any] = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
_A: Tuple = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
_A: str = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
_A: List[Any] = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Dict = self.get_dpr_ctx_encoder_tokenizer()
_A: List[str] = 1
_A: Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
_A: List[str] = [[5, 7], [1_0, 1_1]]
_A: Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_A: Any = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary.
| 366 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ : Tuple = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[Any]=None ):
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
__lowerCamelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
__lowerCamelCase = requests.get(_UpperCamelCase ,headers=_UpperCamelCase ).json()
__lowerCamelCase = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowerCamelCase = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(_UpperCamelCase ):
__lowerCamelCase = requests.get(url + F"""&page={i + 2}""" ,headers=_UpperCamelCase ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : int=None ):
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
__lowerCamelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
__lowerCamelCase = requests.get(_UpperCamelCase ,headers=_UpperCamelCase ).json()
__lowerCamelCase = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
__lowerCamelCase = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(_UpperCamelCase ):
__lowerCamelCase = requests.get(url + F"""&page={i + 2}""" ,headers=_UpperCamelCase ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : Any ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[int] ):
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
__lowerCamelCase = requests.get(_UpperCamelCase ,headers=_UpperCamelCase ,allow_redirects=_UpperCamelCase )
__lowerCamelCase = result.headers['''Location''']
__lowerCamelCase = requests.get(_UpperCamelCase ,allow_redirects=_UpperCamelCase )
__lowerCamelCase = os.path.join(_UpperCamelCase ,F"""{artifact_name}.zip""" )
with open(_UpperCamelCase ,'''wb''' ) as fp:
fp.write(response.content )
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[str]=None ):
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = None
with zipfile.ZipFile(_UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_UpperCamelCase ) as f:
for line in f:
__lowerCamelCase = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__lowerCamelCase = line[: line.index(''': ''' )]
__lowerCamelCase = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
__lowerCamelCase = line[len('''FAILED ''' ) :]
failed_tests.append(_UpperCamelCase )
elif filename == "job_name.txt":
__lowerCamelCase = line
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_UpperCamelCase )} for `errors` """
F"""and {len(_UpperCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
''' problem.''' )
__lowerCamelCase = None
if job_name and job_links:
__lowerCamelCase = job_links.get(_UpperCamelCase ,_UpperCamelCase )
# A list with elements of the form (line of error, error, failed test)
__lowerCamelCase = [x + [y] + [job_link] for x, y in zip(_UpperCamelCase ,_UpperCamelCase )]
return result
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Dict=None ):
__lowerCamelCase = []
__lowerCamelCase = [os.path.join(_UpperCamelCase ,_UpperCamelCase ) for p in os.listdir(_UpperCamelCase ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_UpperCamelCase ,job_links=_UpperCamelCase ) )
return errors
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any]=None ):
__lowerCamelCase = Counter()
counter.update([x[1] for x in logs] )
__lowerCamelCase = counter.most_common()
__lowerCamelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__lowerCamelCase = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
__lowerCamelCase = dict(sorted(r.items() ,key=lambda _UpperCamelCase : item[1]["count"] ,reverse=_UpperCamelCase ) )
return r
def a__ ( _UpperCamelCase : Optional[int] ):
__lowerCamelCase = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
__lowerCamelCase = test.split('''/''' )[2]
else:
__lowerCamelCase = None
return test
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : List[str]=None ):
__lowerCamelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
__lowerCamelCase = [x for x in logs if x[2] is not None]
__lowerCamelCase = {x[2] for x in logs}
__lowerCamelCase = {}
for test in tests:
__lowerCamelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__lowerCamelCase = counter.most_common()
__lowerCamelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__lowerCamelCase = sum(error_counts.values() )
if n_errors > 0:
__lowerCamelCase = {'''count''': n_errors, '''errors''': error_counts}
__lowerCamelCase = dict(sorted(r.items() ,key=lambda _UpperCamelCase : item[1]["count"] ,reverse=_UpperCamelCase ) )
return r
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = '''| no. | error | status |'''
__lowerCamelCase = '''|-:|:-|:-|'''
__lowerCamelCase = [header, sep]
for error in reduced_by_error:
__lowerCamelCase = reduced_by_error[error]['''count''']
__lowerCamelCase = F"""| {count} | {error[:1_00]} | |"""
lines.append(_UpperCamelCase )
return "\n".join(_UpperCamelCase )
def a__ ( _UpperCamelCase : Any ):
__lowerCamelCase = '''| model | no. of errors | major error | count |'''
__lowerCamelCase = '''|-:|-:|-:|-:|'''
__lowerCamelCase = [header, sep]
for model in reduced_by_model:
__lowerCamelCase = reduced_by_model[model]['''count''']
__lowerCamelCase ,__lowerCamelCase = list(reduced_by_model[model]['''errors'''].items() )[0]
__lowerCamelCase = F"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(_UpperCamelCase )
return "\n".join(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
a_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a_ = get_job_links(args.workflow_run_id, token=args.token)
a_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a_ = k.find(""" / """)
a_ = k[index + len(""" / """) :]
a_ = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a_ = reduce_by_error(errors)
a_ = reduce_by_model(errors)
a_ = make_github_table(reduced_by_error)
a_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 330 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 | 1 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
_snake_case = []
for part_id in partition_order:
_snake_case = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> int:
_snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_snake_case = spark.range(100 ).repartition(1 )
_snake_case = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
_snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_snake_case = spark.range(10 ).repartition(2 )
_snake_case = [1, 0]
_snake_case = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
_snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_snake_case , _snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_snake_case = spark.range(10 ).repartition(1 )
_snake_case = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
_snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
_snake_case = lambda __A : x.reverse()
_snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
_snake_case = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
_snake_case , _snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> str:
_snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_snake_case = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
_snake_case , _snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_snake_case = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
_snake_case , _snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
_snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_snake_case = spark.range(100 ).repartition(1 )
_snake_case = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 160 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowercase : List[str] = ""
lowercase : Optional[int] = ""
lowercase : int = ""
lowercase : Tuple = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_snake_case , _snake_case = get_dataset(__A , __A )
print('Processing...' )
_snake_case , _snake_case , _snake_case = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_snake_case = random_chars(32 )
_snake_case = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_snake_case = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(__A )} with {file_name}' )
_snake_case = []
for anno in new_annos[index]:
_snake_case = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(__A )
with open(F'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[list, list]:
_snake_case = []
_snake_case = []
for label_file in glob.glob(os.path.join(__A , '*.txt' ) ):
_snake_case = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__A ) as in_file:
_snake_case = in_file.readlines()
_snake_case = os.path.join(__A , F'{label_name}.jpg' )
_snake_case = []
for obj_list in obj_lists:
_snake_case = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 1 ) -> tuple[list, list, list]:
_snake_case = []
_snake_case = []
_snake_case = []
for idx in range(len(__A ) ):
_snake_case = []
_snake_case = img_list[idx]
path_list.append(__A )
_snake_case = anno_list[idx]
_snake_case = cva.imread(__A )
if flip_type == 1:
_snake_case = cva.flip(__A , __A )
for bbox in img_annos:
_snake_case = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_snake_case = cva.flip(__A , __A )
for bbox in img_annos:
_snake_case = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE__ ( __A = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_snake_case = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 160 | 1 |
"""simple docstring"""
from functools import lru_cache
def lowercase ( __snake_case : int ):
lowercase_ : Dict = 2
lowercase_ : Dict = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def lowercase ( __snake_case : int ):
return len(unique_prime_factors(__snake_case ) )
def lowercase ( __snake_case : list ):
return len(set(__snake_case ) ) in (0, 1)
def lowercase ( __snake_case : int ):
lowercase_ : Dict = 2
while True:
# Increment each value of a generated range
lowercase_ : str = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase_ : Union[str, Any] = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __snake_case : int = 4 ):
lowercase_ : Optional[Any] = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 33 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __a ( UpperCAmelCase = "" , ) ->bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def __a ( UpperCAmelCase = "" ) ->bool:
"""simple docstring"""
if len(UpperCAmelCase ) == 0:
return True
A = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A = {}
for character in lower_case_input_str:
A = character_freq_dict.get(UpperCAmelCase , 0 ) + 1
A = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __a ( UpperCAmelCase = "" ) ->None:
"""simple docstring"""
print("""\nFor string = """ , UpperCAmelCase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
_lowerCamelCase : Any = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
_lowerCamelCase : Any = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 258 | 0 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( __UpperCamelCase ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 351 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A__ :
def __init__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = str(id_ )
A_ = None
A_ = None
A_ = []
A_ = {} # {vertex:distance}
def __lt__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Dict:
'''simple docstring'''
return self.id
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
self.neighbors.append(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = weight
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], UpperCAmelCase__ )
graph[b - 1].add_edge(graph[a - 1], UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list:
A_ = []
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = graph[:]
while q:
A_ = min(UpperCAmelCase__ )
q.remove(UpperCAmelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
for i in range(1, len(UpperCAmelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Iterator[tuple]:
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = list(UpperCAmelCase__ )
hq.heapify(UpperCAmelCase__ )
while h:
A_ = hq.heappop(UpperCAmelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
hq.heapify(UpperCAmelCase__ )
for i in range(1, len(UpperCAmelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 0 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class a ( UpperCAmelCase__ ):
snake_case_ = DistilBertTokenizer
snake_case_ = DistilBertTokenizerFast
snake_case_ = True
@slow
def A_ ( self : Optional[int] ):
snake_case_ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
snake_case_ = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase )
snake_case_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__lowercase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 56 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int ={
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_UpperCAmelCase : Any =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : int = NllbTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , **__lowercase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase_ : List[Any] = legacy_behaviour
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , legacy_behaviour=__lowercase , **__lowercase , )
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : List[Any] = False if not self.vocab_file else True
lowerCAmelCase_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowerCAmelCase_ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase_ : str = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : int = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
lowerCAmelCase_ : List[Any] = tgt_lang_id
return inputs
def lowercase_ ( self , __lowercase , __lowercase = "eng_Latn" , __lowercase = None , __lowercase = "fra_Latn" , **__lowercase , ) -> BatchEncoding:
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def lowercase_ ( self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : List[str] = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Optional[int] = [self.cur_lang_code]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
lowerCAmelCase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Any = [self.cur_lang_code]
lowerCAmelCase_ : Any = [self.eos_token_id]
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 262 | 0 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = " " ):
'''simple docstring'''
snake_case_ = []
snake_case_ = 0
for index, char in enumerate(UpperCamelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
snake_case_ = index + 1
elif index + 1 == len(UpperCamelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 200 |
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ):
'''simple docstring'''
if start is None:
snake_case_ = 0
if end is None:
snake_case_ = len(UpperCamelCase__ ) - 1
if start >= end:
return
snake_case_ = (start + end) // 2
slowsort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
slowsort(UpperCamelCase__ , mid + 1 , UpperCamelCase__ )
if sequence[end] < sequence[mid]:
snake_case_ , snake_case_ = sequence[mid], sequence[end]
slowsort(UpperCamelCase__ , UpperCamelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 200 | 1 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__snake_case = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def __lowerCAmelCase ( lowercase : str = "dhaka" , lowercase : int = 5 ) -> int:
"""simple docstring"""
snake_case : List[Any] = min(lowercase , 50 ) # Prevent abuse!
snake_case : Optional[Any] = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
snake_case : str = requests.get("https://www.google.com/search" , params=lowercase , headers=lowercase )
snake_case : List[str] = BeautifulSoup(html.text , "html.parser" )
snake_case : List[Any] = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
snake_case : Optional[Any] = json.dumps(lowercase )
snake_case : str = json.loads(lowercase )
snake_case : List[str] = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , lowercase , )
if not matched_google_image_data:
return 0
snake_case : List[str] = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(lowercase ) , )
snake_case : Dict = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , lowercase , )
for index, fixed_full_res_image in enumerate(lowercase ):
if index >= max_images:
return index
snake_case : List[str] = bytes(lowercase , "ascii" ).decode(
"unicode-escape" )
snake_case : Dict = bytes(lowercase , "ascii" ).decode(
"unicode-escape" )
snake_case : int = urllib.request.build_opener()
snake_case : int = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(lowercase )
snake_case : Optional[int] = F'query_{query.replace(" " , "_" )}'
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
urllib.request.urlretrieve( # noqa: S310
lowercase , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
__snake_case = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print("""Please provide a search term.""")
raise
| 203 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
__UpperCAmelCase : Optional[Any] = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def lowerCamelCase ( self , UpperCamelCase__=0 ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase__ ) )
snake_case : Any = torch.manual_seed(UpperCamelCase__ )
snake_case : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Tuple = self.get_dummy_inputs()
snake_case : int = pipe(**UpperCamelCase__ ).images
snake_case : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
snake_case : Optional[int] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Tuple = self.get_dummy_inputs()
snake_case : Optional[Any] = pipe(**UpperCamelCase__ ).images
snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Dict = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Any = self.get_dummy_inputs()
snake_case : List[Any] = pipe(**UpperCamelCase__ ).images
snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Optional[Any] = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = self.get_dummy_inputs()
snake_case : Union[str, Any] = pipe(**UpperCamelCase__ ).images
snake_case : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Union[str, Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Any = self.get_dummy_inputs()
snake_case : int = pipe(**UpperCamelCase__ ).images
snake_case : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Union[str, Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = ort.SessionOptions()
snake_case : str = False
return options
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case : Tuple = init_image.resize((128, 128) )
# using the PNDM scheduler by default
snake_case : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : List[Any] = "A fantasy landscape, trending on artstation"
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : int = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="np" , )
snake_case : str = output.images
snake_case : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
snake_case : int = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case : Optional[Any] = init_image.resize((128, 128) )
snake_case : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
snake_case : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Union[str, Any] = "A fantasy landscape, trending on artstation"
snake_case : Tuple = torch.manual_seed(0 )
snake_case : str = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type="np" , )
snake_case : List[Any] = output.images
snake_case : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
snake_case : int = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 203 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
a__ = False
a__ = True
a__ = False
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
a__ = parser.parse_args()
a__ = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
a__ = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
a__ = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
a__ = reader.read()
a__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
a__ = UNetaDModel(**config)
else:
a__ = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
a__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
a__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
a__ = config[key]
del config[key]
a__ = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
a__ = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
a__ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
a__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
a__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
a__ = param_value
a__ = True
if not has_changed:
a__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 15 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a (lowerCamelCase , unittest.TestCase ):
__a : Dict = CLIPTokenizer
__a : int = CLIPTokenizerFast
__a : List[Any] = True
__a : List[Any] = {}
__a : Optional[int] = False
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase_ : Optional[int] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCAmelCase_ : Any = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
UpperCAmelCase_ : Tuple = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : int , **__magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : int , **__magic_name__ : str ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''lower newer'''
UpperCAmelCase_ : Tuple = '''lower newer'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : Optional[Any] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
UpperCAmelCase_ : List[str] = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Dict = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@require_ftfy
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : int = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
UpperCAmelCase_ : Optional[int] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
UpperCAmelCase_ : Tuple = tokenizer_s.tokenize(__magic_name__ )
UpperCAmelCase_ : List[str] = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCAmelCase_ : Dict = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
UpperCAmelCase_ : Tuple = tokenizer_s.tokenize(__magic_name__ )
UpperCAmelCase_ : Tuple = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of space type
UpperCAmelCase_ : Union[str, Any] = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCAmelCase_ : List[str] = tokenizer_s.tokenize(__magic_name__ )
UpperCAmelCase_ : str = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of line break type
UpperCAmelCase_ : int = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCAmelCase_ : Union[str, Any] = tokenizer_s.tokenize(__magic_name__ )
UpperCAmelCase_ : str = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Tuple = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ : Optional[int] = F"""{text_of_1_token} {text_of_1_token}"""
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
UpperCAmelCase_ : int = F""" {text}"""
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
UpperCAmelCase_ : int = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(__magic_name__ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
# CLIP always lower cases letters
pass
| 125 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
UpperCAmelCase_ : List[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[Any] = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
UpperCAmelCase_ : str = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCAmelCase_ : Dict = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCAmelCase_ : Any = new_key.replace(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCAmelCase_ : int = new_key.replace(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCAmelCase_ : Any = re.sub(R'''layers_(\d+)''', R'''layer.\1''', SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = new_key.replace('''encoder''', '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCAmelCase_ : str = re.sub(R'''layers_(\d+)''', R'''layer.\1''', SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = flax_dict[key]
UpperCAmelCase_ : List[str] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCAmelCase_ : Dict = torch.from_numpy(converted_dict[key].T )
else:
UpperCAmelCase_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[str]=False, SCREAMING_SNAKE_CASE__ : Dict=False ) -> int:
UpperCAmelCase_ : Optional[Any] = get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
UpperCAmelCase_ : List[str] = PixaStructVisionConfig()
UpperCAmelCase_ : List[str] = PixaStructTextConfig()
else:
UpperCAmelCase_ : Dict = PixaStructVisionConfig(
hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 )
UpperCAmelCase_ : Optional[int] = PixaStructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18 )
UpperCAmelCase_ : List[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
UpperCAmelCase_ : Tuple = PixaStructImageProcessor()
UpperCAmelCase_ : Optional[int] = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
UpperCAmelCase_ : Union[str, Any] = 4096
UpperCAmelCase_ : Union[str, Any] = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
snake_case_ : Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 125 | 1 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_lowerCAmelCase = 300 # TEMPERATURE (unit = K)
def UpperCamelCase ( a , a , a , ) -> Dict:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCamelCase ( a , a , a , a=1024 ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = [], []
__magic_name__ = list(zip(a , a ) )
__magic_name__ , __magic_name__ = sorted_examples[0]
def is_too_big(a ):
return tok(a , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__magic_name__ = new_src + ''' ''' + src
__magic_name__ = new_tgt + ''' ''' + tgt
if is_too_big(a ) or is_too_big(a ): # cant fit, finalize example
finished_src.append(a )
finished_tgt.append(a )
__magic_name__ , __magic_name__ = src, tgt
else: # can fit, keep adding
__magic_name__ , __magic_name__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a )
finished_tgt.append(a )
return finished_src, finished_tgt
def UpperCamelCase ( a , a , a , a ) -> Any:
'''simple docstring'''
__magic_name__ = Path(a )
save_path.mkdir(exist_ok=a )
for split in ["train"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ , __magic_name__ = pack_examples(a , a , a , a )
print(F'''packed {split} split from {len(a )} examples -> {len(a )}.''' )
Path(save_path / F'''{split}.source''' ).open('''w''' ).write('''\n'''.join(a ) )
Path(save_path / F'''{split}.target''' ).open('''w''' ).write('''\n'''.join(a ) )
for split in ["val", "test"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(a , save_path / F'''{split}.source''' )
shutil.copyfile(a , save_path / F'''{split}.target''' )
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=a , default=128 )
parser.add_argument('''--data_dir''' , type=a )
parser.add_argument('''--save_path''' , type=a )
__magic_name__ = parser.parse_args()
__magic_name__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 98 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _snake_case ( ) -> str:
lowerCamelCase_ : Dict ={
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
lowerCamelCase_ : Dict =Dataset.from_dict(lowerCamelCase__ )
return dataset
class lowercase__ ( snake_case__ ):
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : str =get_dataset()
lowerCamelCase_ : List[Any] =make_duplicate_clusters(snake_case__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Tuple =get_dataset()
lowerCamelCase_ , lowerCamelCase_ : List[Any] =deduplicate_dataset(snake_case__ )
self.assertEqual(len(snake_case__ ) , 2 )
print(snake_case__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , snake_case__ )
| 144 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , snake_case_ : str , snake_case_ : List[str]=7 , snake_case_ : List[str]=3 , snake_case_ : Optional[int]=18 , snake_case_ : List[Any]=30 , snake_case_ : Tuple=400 , snake_case_ : Tuple=True , snake_case_ : List[Any]=None , snake_case_ : str=True , snake_case_ : int=None , snake_case_ : Optional[Any]=True , snake_case_ : List[Any]=[0.4814_5466, 0.457_8275, 0.4082_1073] , snake_case_ : List[str]=[0.2686_2954, 0.2613_0258, 0.2757_7711] , snake_case_ : Tuple=True , ):
UpperCamelCase_: int = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase_: Optional[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase_: int = parent
UpperCamelCase_: Union[str, Any] = batch_size
UpperCamelCase_: List[Any] = num_channels
UpperCamelCase_: Dict = image_size
UpperCamelCase_: List[str] = min_resolution
UpperCamelCase_: Optional[Any] = max_resolution
UpperCamelCase_: Union[str, Any] = do_resize
UpperCamelCase_: Dict = size
UpperCamelCase_: Any = do_center_crop
UpperCamelCase_: Union[str, Any] = crop_size
UpperCamelCase_: List[str] = do_normalize
UpperCamelCase_: Optional[Any] = image_mean
UpperCamelCase_: Optional[Any] = image_std
UpperCamelCase_: List[Any] = do_convert_rgb
def lowerCAmelCase__ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Any=False , snake_case_ : str=False , snake_case_ : List[Any]=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCamelCase_: Dict = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
UpperCamelCase_: Tuple = []
for i in range(self.batch_size ):
UpperCamelCase_: Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCamelCase_: Optional[int] = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
if torchify:
UpperCamelCase_: str = [torch.from_numpy(__A ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Optional[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__A )
@property
def lowerCAmelCase__ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """do_center_crop""" ) )
self.assertTrue(hasattr(__A , """center_crop""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_convert_rgb""" ) )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
UpperCamelCase_: int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase__ ( self : Union[str, Any] ):
pass
def lowerCAmelCase__ ( self : Tuple ):
# Initialize image_processing
UpperCamelCase_: Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
UpperCamelCase_: str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase_: Any = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase__ ( self : Optional[Any] ):
# Initialize image_processing
UpperCamelCase_: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
UpperCamelCase_: List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase_: Dict = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase__ ( self : Any ):
# Initialize image_processing
UpperCamelCase_: str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
UpperCamelCase_: str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase_: List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class _UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: str = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__A )
UpperCamelCase_: Dict = 3
@property
def lowerCAmelCase__ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """do_center_crop""" ) )
self.assertTrue(hasattr(__A , """center_crop""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_convert_rgb""" ) )
def lowerCAmelCase__ ( self : List[Any] ):
pass
def lowerCAmelCase__ ( self : Tuple ):
# Initialize image_processing
UpperCamelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
UpperCamelCase_: Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase_: Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 361 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : Dict , snake_case_ : Tuple=7 , snake_case_ : Optional[Any]=3 , snake_case_ : Dict=18 , snake_case_ : Dict=30 , snake_case_ : Union[str, Any]=400 , snake_case_ : List[Any]=True , snake_case_ : Any=None , snake_case_ : List[str]=True , ):
UpperCamelCase_: Dict = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase_: Union[str, Any] = parent
UpperCamelCase_: Tuple = batch_size
UpperCamelCase_: List[str] = num_channels
UpperCamelCase_: Optional[int] = image_size
UpperCamelCase_: Dict = min_resolution
UpperCamelCase_: Optional[int] = max_resolution
UpperCamelCase_: str = do_resize
UpperCamelCase_: Tuple = size
UpperCamelCase_: Dict = do_normalize
def lowerCAmelCase__ ( self : str ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """clusters""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCamelCase_: Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: int = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_: Optional[int] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_: int = os.path.join(snake_case_ , """image_processor.json""" )
image_processor_first.to_json_file(snake_case_ )
UpperCamelCase_: Any = self.image_processing_class.from_json_file(snake_case_ ).to_dict()
UpperCamelCase_: str = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case_ )
UpperCamelCase_: Optional[int] = self.image_processing_class.from_pretrained(snake_case_ ).to_dict()
UpperCamelCase_: Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase__ ( self : List[Any] ):
pass
def A__ ( ) -> Optional[int]:
UpperCamelCase_: Optional[int] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
UpperCamelCase_: Tuple = Image.open(dataset[4]["""file"""] )
UpperCamelCase_: Union[str, Any] = Image.open(dataset[5]["""file"""] )
UpperCamelCase_: List[str] = [imagea, imagea]
return images
@require_vision
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
UpperCamelCase_: List[str] = prepare_images()
# test non-batched
UpperCamelCase_: List[str] = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
UpperCamelCase_: Union[str, Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ )
# test batched
UpperCamelCase_: Optional[int] = image_processing(snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
UpperCamelCase_: str = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
| 223 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
lowerCamelCase__ = {
"""169M""": 768,
"""430M""": 1_024,
"""1B5""": 2_048,
"""3B""": 2_560,
"""7B""": 4_096,
"""14B""": 5_120,
}
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
__lowerCAmelCase : Any = state_dict.pop(_UpperCamelCase )
# emb -> embedding
if name.startswith('emb.' ):
__lowerCAmelCase : Optional[int] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__lowerCAmelCase : str = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__lowerCAmelCase : Optional[Any] = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , _UpperCamelCase )
# ffn -> feed_forward
__lowerCAmelCase : Optional[int] = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , _UpperCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__lowerCAmelCase : Tuple = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__lowerCAmelCase : Tuple = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__lowerCAmelCase : List[str] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__lowerCAmelCase : Dict = 'rwkv.' + name
__lowerCAmelCase : int = weight
return state_dict
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__lowerCAmelCase : Tuple = 5_0277
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__lowerCAmelCase : Dict = PreTrainedTokenizerFast(tokenizer_file=_UpperCamelCase )
__lowerCAmelCase : int = len(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
# 2. Build the config
__lowerCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowerCAmelCase : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
__lowerCAmelCase : Optional[int] = RwkvConfig(
vocab_size=_UpperCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCamelCase )
# 3. Download model file then convert state_dict
__lowerCAmelCase : Union[str, Any] = hf_hub_download(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Tuple = torch.load(_UpperCamelCase , map_location='cpu' )
__lowerCAmelCase : List[Any] = convert_state_dict(_UpperCamelCase )
# 4. Split in shards and save
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = shard_checkpoint(_UpperCamelCase )
for shard_file, shard in shards.items():
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
if index is not None:
__lowerCAmelCase : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
# Save the index as well
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
__lowerCAmelCase : Optional[int] = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '\n'
f.write(_UpperCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__lowerCAmelCase : List[str] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowerCAmelCase : Union[str, Any] = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase , max_shard_size='2GB' )
tokenizer.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
) | 86 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
snake_case__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case__ : int = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : List[str] = value
elif weight_type == "bias":
snake_case__ : Optional[Any] = value
else:
snake_case__ : str = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = fairseq_model.state_dict()
snake_case__ : List[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case__ : Optional[int] = None
for name, value in fairseq_dict.items():
snake_case__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case__ : Union[str, Any] = True
elif name.split(""".""" )[0] == "proj":
snake_case__ : Tuple = fairseq_model.proj
snake_case__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case__ : Optional[Any] = True
if "*" in mapped_key:
snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase )
if "weight_g" in name:
snake_case__ : str = """weight_g"""
elif "weight_v" in name:
snake_case__ : int = """weight_v"""
elif "bias" in name:
snake_case__ : Dict = """bias"""
elif "weight" in name:
snake_case__ : Union[str, Any] = """weight"""
else:
snake_case__ : Union[str, Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : int = full_name.split("""conv_layers.""" )[-1]
snake_case__ : Dict = name.split(""".""" )
snake_case__ : Any = int(items[0] )
snake_case__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case__ : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case__ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ , snake_case__ : str = emb.weight.shape
snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
snake_case__ : List[str] = emb.weight.data
return lin_layer
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : int = f.readlines()
snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines]
snake_case__ : Union[str, Any] = len(_lowerCAmelCase )
snake_case__ : Any = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int:
snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
snake_case__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase )
snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase )
snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
snake_case__ : Tuple = False
# add projection layer
snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight )
snake_case__ : int = nn.Parameter(projection_layer.bias )
snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
snake_case__ : Optional[Any] = hf_wavavec.config.to_dict()
snake_case__ : Tuple = tokenizer.pad_token_id
snake_case__ : Optional[Any] = tokenizer.bos_token_id
snake_case__ : int = tokenizer.eos_token_id
snake_case__ : str = """speech_to_text_2"""
snake_case__ : List[Any] = """wav2vec2"""
snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 35 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _snake_case ( UpperCamelCase : list , UpperCamelCase : list ):
if len(UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
UpperCAmelCase : Any = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _snake_case ( UpperCamelCase : list , UpperCamelCase : list ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(UpperCamelCase ) )
]
def _snake_case ( UpperCamelCase : list , UpperCamelCase : list ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(UpperCamelCase ) )
]
def _snake_case ( UpperCamelCase : list ):
if len(UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
UpperCAmelCase : Tuple = len(UpperCamelCase )
UpperCAmelCase : Any = matrix_length // 2
UpperCAmelCase : List[str] = [[a[i][j] for j in range(UpperCamelCase , UpperCamelCase )] for i in range(UpperCamelCase )]
UpperCAmelCase : str = [
[a[i][j] for j in range(UpperCamelCase , UpperCamelCase )] for i in range(UpperCamelCase , UpperCamelCase )
]
UpperCAmelCase : Union[str, Any] = [[a[i][j] for j in range(UpperCamelCase )] for i in range(UpperCamelCase )]
UpperCAmelCase : Any = [[a[i][j] for j in range(UpperCamelCase )] for i in range(UpperCamelCase , UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def _snake_case ( UpperCamelCase : list ):
return len(UpperCamelCase ), len(matrix[0] )
def _snake_case ( UpperCamelCase : list ):
print("""\n""".join(str(UpperCamelCase ) for line in matrix ) )
def _snake_case ( UpperCamelCase : list , UpperCamelCase : list ):
if matrix_dimensions(UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(UpperCamelCase , UpperCamelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = split_matrix(UpperCamelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = split_matrix(UpperCamelCase )
UpperCAmelCase : int = actual_strassen(UpperCamelCase , matrix_subtraction(UpperCamelCase , UpperCamelCase ) )
UpperCAmelCase : List[str] = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
UpperCAmelCase : List[Any] = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
UpperCAmelCase : Any = actual_strassen(UpperCamelCase , matrix_subtraction(UpperCamelCase , UpperCamelCase ) )
UpperCAmelCase : Tuple = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) )
UpperCAmelCase : Any = actual_strassen(matrix_subtraction(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) )
UpperCAmelCase : Tuple = actual_strassen(matrix_subtraction(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) )
UpperCAmelCase : Union[str, Any] = matrix_addition(matrix_subtraction(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) , UpperCamelCase )
UpperCAmelCase : str = matrix_addition(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : Union[str, Any] = matrix_addition(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) , UpperCamelCase )
# construct the new matrix from our 4 quadrants
UpperCAmelCase : int = []
for i in range(len(UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _snake_case ( UpperCamelCase : list , UpperCamelCase : list ):
if matrix_dimensions(UpperCamelCase )[1] != matrix_dimensions(UpperCamelCase )[0]:
UpperCAmelCase : Optional[Any] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(UpperCamelCase )
UpperCAmelCase : Any = matrix_dimensions(UpperCamelCase )
UpperCAmelCase : Any = matrix_dimensions(UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCAmelCase : Any = max(*UpperCamelCase , *UpperCamelCase )
UpperCAmelCase : Any = int(math.pow(2 , math.ceil(math.loga(UpperCamelCase ) ) ) )
UpperCAmelCase : int = matrixa
UpperCAmelCase : List[str] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCAmelCase : Optional[Any] = actual_strassen(UpperCamelCase , UpperCamelCase )
# Removing the additional zeros
for i in range(0 , UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A: Union[str, Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A: List[str] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 76 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A: int = logging.get_logger(__name__)
A: Any = {"vocab_file": "vocab.txt"}
A: Optional[int] = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
A: Optional[int] = {
"YituTech/conv-bert-base": 5_1_2,
"YituTech/conv-bert-medium-small": 5_1_2,
"YituTech/conv-bert-small": 5_1_2,
}
A: int = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : int = ConvBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase : Dict = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[int] = strip_accents
UpperCAmelCase : List[str] = tokenize_chinese_chars
UpperCAmelCase : Dict = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = do_lower_case
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : Dict = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 76 | 1 |
from collections.abc import Generator
def __snake_case ( ) -> Generator[int, None, None]:
A_ , A_ : int = 0, 1
while True:
A_ , A_ : List[str] = b, a + b
yield b
def __snake_case ( _lowerCAmelCase : int = 1000 ) -> int:
A_ : int = 1
A_ : List[str] = fibonacci_generator()
while len(str(next(_lowerCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 300 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 1
@register_to_config
def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ):
'''simple docstring'''
A_ : Dict = sigma_max
# setable values
A_ : List[Any] = None
self.set_sigmas(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ):
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ):
'''simple docstring'''
A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case , snake_case )
A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) )
A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
A_ : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : Dict = timesteps.to(self.discrete_sigmas.device )
A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device )
A_ : Union[str, Any] = torch.zeros_like(snake_case )
A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Tuple = diffusion.unsqueeze(-1 )
A_ : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype )
A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : str = step_size.unsqueeze(-1 )
A_ : Optional[Any] = sample + step_size * model_output
A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ):
'''simple docstring'''
A_ : Union[str, Any] = timesteps.to(original_samples.device )
A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case ) * sigmas[:, None, None, None]
)
A_ : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 300 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def snake_case_ ( snake_case ) -> str:
return "".join(sorted(snake_case ) )
def snake_case_ ( snake_case ) -> list[str]:
return word_by_signature[signature(snake_case )]
__lowerCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 288 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
lowercase__: Dict = os.path.abspath(snake_case )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
lowercase__: Optional[Any] = tf.train.list_variables(snake_case )
lowercase__: List[Any] = []
lowercase__: Tuple = []
lowercase__: int = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase__: Union[str, Any] = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase__: str = name[1:]
# figure out how many levels deep the name is
lowercase__: Optional[Any] = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(snake_case )
# read data
lowercase__: Optional[Any] = tf.train.load_variable(snake_case , snake_case )
names.append('/'.join(snake_case ) )
arrays.append(snake_case )
logger.info(f'Read a total of {len(snake_case ):,} layers' )
# Sanity check
if len(set(snake_case ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(snake_case ) )})' )
lowercase__: Any = list(set(snake_case ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(snake_case , snake_case ):
lowercase__: Optional[int] = full_name.split('/' )
lowercase__: List[Any] = model
lowercase__: Any = []
for i, m_name in enumerate(snake_case ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
lowercase__: Any = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
lowercase__: str = getattr(snake_case , 'embeddings' )
lowercase__: int = getattr(snake_case , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
lowercase__: int = getattr(snake_case , 'encoder' )
lowercase__: List[str] = getattr(snake_case , 'layer' )
lowercase__: Union[str, Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
lowercase__: Tuple = getattr(snake_case , 'pooler' )
lowercase__: Tuple = getattr(snake_case , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
lowercase__: Union[str, Any] = getattr(snake_case , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
lowercase__: Union[str, Any] = getattr(snake_case , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
lowercase__: Dict = getattr(snake_case , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
lowercase__: Optional[Any] = getattr(snake_case , 'token_type_embeddings' )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append('weight' )
lowercase__: List[str] = getattr(snake_case , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
lowercase__: int = getattr(snake_case , 'attention' )
lowercase__: Union[str, Any] = getattr(snake_case , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
lowercase__: str = getattr(snake_case , 'attention' )
lowercase__: Optional[Any] = getattr(snake_case , 'output' )
lowercase__: List[Any] = getattr(snake_case , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
lowercase__: Optional[Any] = getattr(snake_case , 'attention' )
lowercase__: Optional[int] = getattr(snake_case , 'output' )
lowercase__: Optional[Any] = getattr(snake_case , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
lowercase__: Union[str, Any] = getattr(snake_case , 'output' )
lowercase__: List[Any] = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
lowercase__: Any = getattr(snake_case , 'output' )
lowercase__: str = getattr(snake_case , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
lowercase__: Tuple = getattr(snake_case , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
lowercase__: List[str] = getattr(snake_case , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
lowercase__: Optional[int] = getattr(snake_case , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
lowercase__: Any = getattr(snake_case , 'intermediate' )
lowercase__: str = getattr(snake_case , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
lowercase__: Union[str, Any] = getattr(snake_case , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
lowercase__: str = getattr(snake_case , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
lowercase__: Tuple = getattr(snake_case , 'weight' )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
lowercase__: Any = '.'.join(snake_case )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , snake_case ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , snake_case ):
lowercase__: Union[str, Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase__: str = array.transpose()
if pointer.shape == array.shape:
lowercase__: Union[str, Any] = torch.from_numpy(snake_case )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
# Instantiate model
logger.info(f'Loading model based on config from {config_path}...' )
lowercase__: int = BertConfig.from_json_file(snake_case )
lowercase__: Tuple = BertModel(snake_case )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(snake_case , snake_case , snake_case )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__lowerCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 288 | 1 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 187 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 999_999_999
lowerCAmelCase = 0
lowerCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase = remaining_time[j]
lowerCAmelCase = j
lowerCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase = remaining_time[short]
if minm == 0:
lowerCAmelCase = 999_999_999
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase = False
# Find finish time of current process
lowerCAmelCase = increment_time + 1
# Calculate waiting time
lowerCAmelCase = finish_time - arrival_time[short]
lowerCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = total_waiting_time + waiting_time[i]
lowerCAmelCase = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
UpperCAmelCase = int(input())
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
UpperCAmelCase , UpperCAmelCase = map(int, input().split())
UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase = burst_time
UpperCAmelCase = no_of_processes
UpperCAmelCase = waiting_time
UpperCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs) | 187 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Any = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCAmelCase ( __lowercase ):
'''simple docstring'''
lowerCAmelCase_ = """xlm-roberta-xl"""
def __init__( self : str , __lowercase : List[Any]=25_08_80 , __lowercase : Tuple=25_60 , __lowercase : Any=36 , __lowercase : Any=32 , __lowercase : str=1_02_40 , __lowercase : Union[str, Any]="gelu" , __lowercase : List[str]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=5_14 , __lowercase : Optional[int]=1 , __lowercase : int=0.02 , __lowercase : Optional[Any]=1E-05 , __lowercase : Optional[Any]=1 , __lowercase : Dict=0 , __lowercase : Union[str, Any]=2 , __lowercase : List[Any]="absolute" , __lowercase : int=True , __lowercase : Tuple=None , **__lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
class UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def snake_case__ ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
snake_case_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 187 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 0 |
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = abs(_a )
lowerCAmelCase__ = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = abs(_a )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def __lowerCamelCase ( lowerCAmelCase__ ):
return sum(int(_a ) for c in str(abs(_a ) ) )
def __lowerCamelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
lowerCAmelCase__ = F"""{func.__name__}({value})"""
lowerCAmelCase__ = timeit(F"""__main__.{call}""" , setup='import __main__' )
print(F"""{call:56} = {func(_a )} -- {timing:.4f} seconds""" )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 361 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 'huggingface/label-files'
lowerCAmelCase__ = 'imagenet-1k-id2label.json'
lowerCAmelCase__ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase__ = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1_0_0_0 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def __lowerCamelCase ( lowerCAmelCase__ ):
if "stem.conv" in name:
lowerCAmelCase__ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCAmelCase__ = name.replace('blocks' , 'layers' )
if "head.fc" in name:
lowerCAmelCase__ = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
lowerCAmelCase__ = 'bit.' + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase__ = 'bit.encoder.' + name
return name
def __lowerCamelCase ( ):
lowerCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
lowerCAmelCase__ = get_config(lowerCAmelCase__ )
# load original model from timm
lowerCAmelCase__ = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase__ = state_dict.pop(lowerCAmelCase__ )
lowerCAmelCase__ = val.squeeze() if 'head' in key else val
# load HuggingFace model
lowerCAmelCase__ = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
lowerCAmelCase__ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
lowerCAmelCase__ = transform.transforms
lowerCAmelCase__ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCAmelCase__ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = transform(lowerCAmelCase__ ).unsqueeze(0 )
lowerCAmelCase__ = processor(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
lowerCAmelCase__ = model(lowerCAmelCase__ )
lowerCAmelCase__ = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase__ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 119 | 0 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = ["""vqvae"""]
def __init__( self , _a , _a , _a , _a , ) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_a , scheduler=_a , mel=_a , vqvae=_a )
def _a ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _a ) else 1_000
@torch.no_grad()
def __call__( self , _a = 1 , _a = None , _a = None , _a = 0 , _a = 0 , _a = None , _a = None , _a = 0 , _a = 0 , _a = None , _a = 0 , _a = None , _a = None , _a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
SCREAMING_SNAKE_CASE__ : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_a , device=self.device , )
SCREAMING_SNAKE_CASE__ : str = noise
SCREAMING_SNAKE_CASE__ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.mel.audio_slice_to_image(_a )
SCREAMING_SNAKE_CASE__ : Tuple = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (input_image / 255) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vqvae.encode(torch.unsqueeze(_a , 0 ) ).latent_dist.sample(
generator=_a )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : str = self.scheduler.add_noise(_a , _a , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : Any = int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Tuple = int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Any = self.scheduler.add_noise(_a , _a , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _a ):
SCREAMING_SNAKE_CASE__ : str = self.unet(_a , _a , _a )["""sample"""]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.unet(_a , _a )["""sample"""]
if isinstance(self.scheduler , _a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler.step(
model_output=_a , timestep=_a , sample=_a , eta=_a , generator=_a , )["""prev_sample"""]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler.step(
model_output=_a , timestep=_a , sample=_a , generator=_a , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : str = mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : str = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : List[Any] = 1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : Tuple = self.vqvae.decode(_a )["""sample"""]
SCREAMING_SNAKE_CASE__ : str = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Tuple = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Optional[int] = (images * 255).round().astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a , mode="""RGB""" ).convert("""L""" ) for _ in images) )
SCREAMING_SNAKE_CASE__ : List[Any] = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) , **ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self , _a , _a = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _a )
self.scheduler.set_timesteps(_a )
SCREAMING_SNAKE_CASE__ : str = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : Dict = (sample / 255) * 2 - 1
SCREAMING_SNAKE_CASE__ : int = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Dict = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : Any = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : Any = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : int = self.unet(_a , _a )["""sample"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : int = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a , _a , _a ) -> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = acos(torch.dot(torch.flatten(_a ) , torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 132 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
SCREAMING_SNAKE_CASE__ : Optional[int] = test_metrics
@require_cpu
def _a ( self ) -> List[Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _a ( self ) -> List[str]:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _a ( self ) -> int:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
| 132 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=8 ):
"""simple docstring"""
lowercase__ : str = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=512 , lowerCamelCase__=512 ):
"""simple docstring"""
lowercase__ : Optional[int] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase__ : Tuple = np.array(pil_image.convert("RGB" ) )
lowercase__ : Tuple = arr.astype(np.floataa ) / 127.5 - 1
lowercase__ : List[str] = np.transpose(lowerCamelCase__ , [2, 0, 1] )
lowercase__ : Optional[int] = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 )
return image
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : DDPMScheduler , SCREAMING_SNAKE_CASE : VQModel , ):
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , movq=SCREAMING_SNAKE_CASE , )
lowercase__ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
# get the original timestep using init_timestep
lowercase__ : int = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE )
lowercase__ : str = max(num_inference_steps - init_timestep , 0 )
lowercase__ : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any=None ):
if not isinstance(SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE )}""" )
lowercase__ : Any = image.to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase__ : List[str] = image
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : int = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE )
]
lowercase__ : Dict = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
else:
lowercase__ : List[Any] = self.movq.encode(SCREAMING_SNAKE_CASE ).latent_dist.sample(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.movq.config.scaling_factor * init_latents
lowercase__ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase__ : int = init_latents.shape
lowercase__ : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
# get latents
lowercase__ : Any = self.scheduler.add_noise(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = init_latents
return latents
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Dict = torch.device(f"""cuda:{gpu_id}""" )
lowercase__ : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ : List[Any] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prev_module_hook=SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
lowercase__ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Dict ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE )
def __call__( self : str , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 4.0 , SCREAMING_SNAKE_CASE : float = 0.3 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = self._execution_device
lowercase__ : Tuple = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
lowercase__ : Optional[Any] = image_embeds.shape[0]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
lowercase__ : Optional[int] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
lowercase__ : Dict = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
lowercase__ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : str = [image]
if not all(isinstance(SCREAMING_SNAKE_CASE , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(SCREAMING_SNAKE_CASE ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase__ : str = torch.cat([prepare_image(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in image] , dim=0 )
lowercase__ : int = image.to(dtype=image_embeds.dtype , device=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.movq.encode(SCREAMING_SNAKE_CASE )["latents"]
lowercase__ : Dict = latents.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ : Any = self.get_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase__ , lowercase__ : List[str] = downscale_height_and_width(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.movq_scale_factor )
lowercase__ : Tuple = self.prepare_latents(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , image_embeds.dtype , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ : Tuple = {"image_embeds": image_embeds}
lowercase__ : Any = self.unet(
sample=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , added_cond_kwargs=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ : str = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ : Any = noise_pred.chunk(2 )
lowercase__ , lowercase__ : List[str] = variance_pred.chunk(2 )
lowercase__ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ : List[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Tuple = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , )[0]
# post-processing
lowercase__ : Optional[Any] = self.movq.decode(SCREAMING_SNAKE_CASE , force_not_quantize=SCREAMING_SNAKE_CASE )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase__ : Optional[int] = image * 0.5 + 0.5
lowercase__ : List[str] = image.clamp(0 , 1 )
lowercase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 121 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = "cpu" , lowerCamelCase__ = None ):
"""simple docstring"""
lowercase__ : Any = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCamelCase__ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowercase__ : int = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Optional[Any] = src_path
torch.save(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 121 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__snake_case = logging.getLogger(__name__)
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_a = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_a = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_a = field(default=A__ , metadata={'help': 'Whether tp freeze the encoder.'} )
_a = field(default=A__ , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_a = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
_a = field(
default=10_24 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a = field(
default=1_28 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
_a = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
_a = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
_a = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
_a = field(default=A__ , metadata={'help': 'Source language id for translation.'} )
_a = field(default=A__ , metadata={'help': 'Target language id for translation.'} )
_a = field(default=A__ , metadata={'help': '# num_beams to use for evaluation.'} )
_a = field(
default=A__ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(__a , os.path.join(__a , f'''{split}_results.json''' ) )
def a ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ :List[str] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__a , __a , __a ):
assert hasattr(__a , __a ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__a , __a , getattr(__a , __a ) )
UpperCamelCase__ :Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ :int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase__ :Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a , __a ):
UpperCamelCase__ :Union[str, Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase__ :List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase__ :str = SeqaSeqDataset
# Get datasets
UpperCamelCase__ :List[Any] = (
dataset_class(
__a , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
UpperCamelCase__ :List[str] = (
dataset_class(
__a , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase__ :Optional[int] = (
dataset_class(
__a , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase__ :List[str] = (
build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None
)
UpperCamelCase__ :Optional[int] = SeqaSeqTrainer(
model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator(
__a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , )
UpperCamelCase__ :Any = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
UpperCamelCase__ :List[str] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase__ :Any = train_result.metrics
UpperCamelCase__ :str = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , __a , training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ :Optional[int] = trainer.evaluate(metric_key_prefix='''val''' )
UpperCamelCase__ :Union[str, Any] = data_args.n_val
UpperCamelCase__ :List[Any] = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCamelCase__ :Any = trainer.predict(test_dataset=__a , metric_key_prefix='''test''' )
UpperCamelCase__ :Optional[int] = test_output.metrics
UpperCamelCase__ :Union[str, Any] = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase__ :str = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
UpperCamelCase__ :Dict = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
UpperCamelCase__ :List[str] = lmap(str.strip , __a )
write_txt_file(__a , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__a , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def a ( __a ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : Any ) -> int:
lowerCamelCase__ : int = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
lowerCamelCase__ : Any = 'The dog is cute and lives in the garden house'
lowerCamelCase__ : Union[str, Any] = jnp.array([tokenizer.encode(UpperCAmelCase )] )
lowerCamelCase__ : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase__ : str = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase )['last_hidden_state']
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCAmelCase , atol=1e-3 ) )
| 45 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : int = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase__ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Dict = logging.getLogger()
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int:
"""simple docstring"""
_UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
return json.load(lowercase )
raise ValueError(F"""can't find {path}""" )
lowercase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_glue.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_clm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_summarization_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_ta_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_ner.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_qa.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 324 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple=1_3 , lowerCAmelCase_ : Any=3_2 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : Dict=[1, 2, 1] , lowerCAmelCase_ : str=[2, 2, 4] , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Union[str, Any]=2.0 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Tuple=1e-5 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Tuple=1_0 , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : List[str]=["stage1", "stage2", "stage3"] , lowerCAmelCase_ : List[str]=[1, 2, 3] , ):
"""simple docstring"""
_A: Optional[int] = parent
_A: int = batch_size
_A: Optional[Any] = image_size
_A: Any = patch_size
_A: List[str] = num_channels
_A: int = embed_dim
_A: Optional[int] = depths
_A: Union[str, Any] = num_heads
_A: int = window_size
_A: Optional[int] = mlp_ratio
_A: Union[str, Any] = qkv_bias
_A: Optional[int] = hidden_dropout_prob
_A: Optional[int] = attention_probs_dropout_prob
_A: Dict = drop_path_rate
_A: List[str] = hidden_act
_A: Tuple = use_absolute_embeddings
_A: str = patch_norm
_A: int = layer_norm_eps
_A: Union[str, Any] = initializer_range
_A: Tuple = is_training
_A: Union[str, Any] = scope
_A: Union[str, Any] = use_labels
_A: Union[str, Any] = type_sequence_label_size
_A: List[str] = encoder_stride
_A: List[Any] = out_features
_A: int = out_indices
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Dict = None
if self.use_labels:
_A: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A: Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Dict = MaskFormerSwinModel(config=_A )
model.to(_A )
model.eval()
_A: Any = model(_A )
_A: Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_A: Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Union[str, Any] = MaskFormerSwinBackbone(config=_A )
model.to(_A )
model.eval()
_A: str = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_A ):
_A: int = ['stem']
_A: List[str] = MaskFormerSwinBackbone(config=_A )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Dict = self.prepare_config_and_inputs()
_A: Union[str, Any] = config_and_inputs
_A: str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : int = False
__UpperCamelCase : Optional[int] = False
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = MaskFormerSwinModelTester(self )
_A: str = ConfigTester(self , config_class=_A , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_A )
@unittest.skip('''Swin does not use inputs_embeds''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: str = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A: int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: str = model_class(_A )
_A: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: int = [*signature.parameters.keys()]
_A: int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_A: Any = model(**self._prepare_for_class(_A , _A ) )
_A: str = outputs.hidden_states
_A: List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_A ) , _A )
# Swin has a different seq_length
_A: Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_A: List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_A: Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A: Tuple = True
self.check_hidden_states_output(_A , _A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: List[Any] = True
self.check_hidden_states_output(_A , _A , _A , _A )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_A: List[Any] = 3
_A: Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A: Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_A: Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A: Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A: Dict = True
self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: Tuple = True
self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCAmelCase_ : Tuple ):
_A: List[Any] = 0
return t
def check_equivalence(lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]={} ):
with torch.no_grad():
_A: str = model(**_A , return_dict=_A , **_A )
_A: Tuple = model(**_A , return_dict=_A , **_A ).to_tuple()
def recursive_check(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ):
if isinstance(_A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_A , _A ):
recursive_check(_A , _A )
elif isinstance(_A , _A ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_A , _A )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_A ) , set_nan_tensor_to_zero(_A ) , atol=1e-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(_A ).any()} and `inf`: {torch.isinf(_A )}. Dict has"""
F""" `nan`: {torch.isnan(_A ).any()} and `inf`: {torch.isinf(_A )}."""
) , )
recursive_check(_A , _A )
for model_class in self.all_model_classes:
_A: Any = model_class(_A )
model.to(_A )
model.eval()
_A: Dict = self._prepare_for_class(_A , _A )
_A: List[str] = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A )
_A: Union[str, Any] = self._prepare_for_class(_A , _A , return_labels=_A )
_A: Any = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A )
_A: str = self._prepare_for_class(_A , _A )
_A: Union[str, Any] = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A , {'''output_hidden_states''': True} )
_A: int = self._prepare_for_class(_A , _A , return_labels=_A )
_A: Optional[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A , {'''output_hidden_states''': True} )
@require_torch
class UpperCAmelCase ( unittest.TestCase , snake_case_ ):
'''simple docstring'''
__UpperCamelCase : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCamelCase : int = MaskFormerSwinConfig
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Optional[Any] = MaskFormerSwinModelTester(self )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs_for_common()
_A: Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_A: str = backbone_class(_A )
backbone.to(_A )
backbone.eval()
_A: List[str] = backbone(**_A )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _A )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_A: Any = backbone(**_A , output_hidden_states=_A )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A: Any = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_A: Any = backbone(**_A , output_attentions=_A )
self.assertIsNotNone(outputs.attentions )
| 350 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
__UpperCamelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
__UpperCamelCase : str = "audio"
__UpperCamelCase : str = "transcription"
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , lowerCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
_A: Optional[int] = copy.deepcopy(self )
_A: str = self.input_schema.copy()
_A: List[str] = features[self.audio_column]
_A: Dict = input_schema
return task_template
@property
def __magic_name__ ( self : str ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 301 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_SCREAMING_SNAKE_CASE : List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = len([g for position, g in enumerate(UpperCamelCase_ ) if g == main_target[position]] )
return (item, float(UpperCamelCase_ ))
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = random.randint(0 ,len(UpperCamelCase_ ) - 1 )
snake_case = parent_a[:random_slice] + parent_a[random_slice:]
snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = list(UpperCamelCase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
snake_case = random.choice(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,):
"""simple docstring"""
snake_case = []
# Generate more children proportionally to the fitness score.
snake_case = int(parent_a[1] * 1_00 ) + 1
snake_case = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase_ ):
snake_case = population_score[random.randint(0 ,UpperCamelCase_ )][0]
snake_case , snake_case = crossover(parent_a[0] ,UpperCamelCase_ )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase_ ,UpperCamelCase_ ) )
pop.append(mutate(UpperCamelCase_ ,UpperCamelCase_ ) )
return pop
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
snake_case = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(UpperCamelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(UpperCamelCase_ )
# Generate random starting population.
snake_case = []
for _ in range(UpperCamelCase_ ):
population.append(''''''.join([random.choice(UpperCamelCase_ ) for i in range(len(UpperCamelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case , snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case = [evaluate(UpperCamelCase_ ,UpperCamelCase_ ) for item in population]
# Check if there is a matching evolution.
snake_case = sorted(UpperCamelCase_ ,key=lambda UpperCamelCase_ : x[1] ,reverse=UpperCamelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase_ )
# Normalize population score to be between 0 and 1.
snake_case = [
(item, score / len(UpperCamelCase_ )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase_ ):
population.extend(select(population_score[int(UpperCamelCase_ )] ,UpperCamelCase_ ,UpperCamelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_SCREAMING_SNAKE_CASE : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 127 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : List[Any] = "PoolFormerConfig"
# Base docstring
_SCREAMING_SNAKE_CASE : Any = "sail/poolformer_s12"
_SCREAMING_SNAKE_CASE : str = [1, 5_12, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Any = "sail/poolformer_s12"
_SCREAMING_SNAKE_CASE : List[Any] = "tabby, tabby cat"
_SCREAMING_SNAKE_CASE : List[str] = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ = 0.0 ,UpperCamelCase_ = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
snake_case = 1 - drop_prob
snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case = keep_prob + torch.rand(UpperCamelCase_ ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
snake_case = input.div(UpperCamelCase_ ) * random_tensor
return output
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case = None ):
super().__init__()
snake_case = drop_prob
def a_ ( self , __snake_case ):
return drop_path(__snake_case , self.drop_prob , self.training )
def a_ ( self ):
return "p={}".format(self.drop_prob )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None ):
super().__init__()
snake_case = patch_size if isinstance(__snake_case , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case = stride if isinstance(__snake_case , collections.abc.Iterable ) else (stride, stride)
snake_case = padding if isinstance(__snake_case , collections.abc.Iterable ) else (padding, padding)
snake_case = nn.Convad(__snake_case , __snake_case , kernel_size=__snake_case , stride=__snake_case , padding=__snake_case )
snake_case = norm_layer(__snake_case ) if norm_layer else nn.Identity()
def a_ ( self , __snake_case ):
snake_case = self.projection(__snake_case )
snake_case = self.norm(__snake_case )
return embeddings
class A__ ( nn.GroupNorm ):
"""simple docstring"""
def __init__( self , __snake_case , **__snake_case ):
super().__init__(1 , __snake_case , **__snake_case )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = nn.AvgPoolad(__snake_case , stride=1 , padding=pool_size // 2 , count_include_pad=__snake_case )
def a_ ( self , __snake_case ):
return self.pool(__snake_case ) - hidden_states
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case ):
super().__init__()
snake_case = nn.Convad(__snake_case , __snake_case , 1 )
snake_case = nn.Convad(__snake_case , __snake_case , 1 )
snake_case = PoolFormerDropPath(__snake_case )
if isinstance(config.hidden_act , __snake_case ):
snake_case = ACTaFN[config.hidden_act]
else:
snake_case = config.hidden_act
def a_ ( self , __snake_case ):
snake_case = self.conva(__snake_case )
snake_case = self.act_fn(__snake_case )
snake_case = self.drop(__snake_case )
snake_case = self.conva(__snake_case )
snake_case = self.drop(__snake_case )
return hidden_states
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
super().__init__()
snake_case = PoolFormerPooling(__snake_case )
snake_case = PoolFormerOutput(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case = PoolFormerGroupNorm(__snake_case )
snake_case = PoolFormerGroupNorm(__snake_case )
# Useful for training neural nets
snake_case = PoolFormerDropPath(__snake_case ) if drop_path > 0.0 else nn.Identity()
snake_case = config.use_layer_scale
if config.use_layer_scale:
snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((__snake_case) ) , requires_grad=__snake_case )
snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((__snake_case) ) , requires_grad=__snake_case )
def a_ ( self , __snake_case ):
if self.use_layer_scale:
snake_case = self.pooling(self.before_norm(__snake_case ) )
snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case = hidden_states + self.drop_path(__snake_case )
snake_case = ()
snake_case = self.output(self.after_norm(__snake_case ) )
snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case = hidden_states + self.drop_path(__snake_case )
snake_case = (output,) + outputs
return outputs
else:
snake_case = self.drop_path(self.pooling(self.before_norm(__snake_case ) ) )
# First residual connection
snake_case = pooling_output + hidden_states
snake_case = ()
# Second residual connection inside the PoolFormerOutput block
snake_case = self.drop_path(self.output(self.after_norm(__snake_case ) ) )
snake_case = hidden_states + layer_output
snake_case = (output,) + outputs
return outputs
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = config
# stochastic depth decay rule
snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case = nn.ModuleList(__snake_case )
# Transformer blocks
snake_case = []
snake_case = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__snake_case , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__snake_case ) )
snake_case = nn.ModuleList(__snake_case )
def a_ ( self , __snake_case , __snake_case=False , __snake_case=True ):
snake_case = () if output_hidden_states else None
snake_case = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case = layers
# Get patch embeddings from hidden_states
snake_case = embedding_layer(__snake_case )
# Send the embeddings through the blocks
for _, blk in enumerate(__snake_case ):
snake_case = blk(__snake_case )
snake_case = layer_outputs[0]
if output_hidden_states:
snake_case = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = PoolFormerConfig
__magic_name__ = 'poolformer'
__magic_name__ = 'pixel_values'
__magic_name__ = True
def a_ ( self , __snake_case ):
if isinstance(__snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__snake_case , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a_ ( self , __snake_case , __snake_case=False ):
if isinstance(__snake_case , __snake_case ):
snake_case = value
_SCREAMING_SNAKE_CASE : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_SCREAMING_SNAKE_CASE : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , snake_case__ , )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__(__snake_case )
snake_case = config
snake_case = PoolFormerEncoder(__snake_case )
# Initialize weights and apply final processing
self.post_init()
def a_ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ ( self , __snake_case = None , __snake_case = None , __snake_case = None , ):
snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
snake_case = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , )
snake_case = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__snake_case , hidden_states=encoder_outputs.hidden_states , )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = nn.Linear(config.hidden_size , config.hidden_size )
def a_ ( self , __snake_case ):
snake_case = self.dense(__snake_case )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , snake_case__ , )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__(__snake_case )
snake_case = config.num_labels
snake_case = PoolFormerModel(__snake_case )
# Final norm
snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ ( self , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , ):
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
snake_case = self.poolformer(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , )
snake_case = outputs[0]
snake_case = self.classifier(self.norm(__snake_case ).mean([-2, -1] ) )
snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case = '''single_label_classification'''
else:
snake_case = '''multi_label_classification'''
if self.config.problem_type == "regression":
snake_case = MSELoss()
if self.num_labels == 1:
snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
snake_case = CrossEntropyLoss()
snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case = BCEWithLogitsLoss()
snake_case = loss_fct(__snake_case , __snake_case )
if not return_dict:
snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 127 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def lowercase ( self : Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase ( self : List[str] ):
_snake_case = ort.SessionOptions()
_snake_case = False
return options
def lowercase ( self : Dict ):
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
_snake_case = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_snake_case = '''A red cat sitting on a park bench'''
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=_lowerCamelCase , output_type='''np''' , )
_snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 40 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowerCAmelCase__ ( A_ ):
__a = """roberta"""
def __init__( self : str , _lowerCamelCase : Dict=50265 , _lowerCamelCase : Tuple=768 , _lowerCamelCase : List[Any]=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : Optional[int]=3072 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : int=2 , _lowerCamelCase : str=0.0_2 , _lowerCamelCase : List[Any]=1e-12 , _lowerCamelCase : int=1 , _lowerCamelCase : int=0 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : List[Any]="absolute" , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : str=None , **_lowerCamelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = classifier_dropout
class lowerCAmelCase__ ( A_ ):
@property
def lowercase ( self : Dict ):
if self.task == "multiple-choice":
_snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 40 | 1 |
'''simple docstring'''
def a_ ( _lowerCAmelCase ) -> tuple[int, int]:
try:
__lowerCamelCase : List[Any] = float(_lowerCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__lowerCamelCase : List[Any] = decimal - int(_lowerCAmelCase )
if fractional_part == 0:
return int(_lowerCAmelCase ), 1
else:
__lowerCamelCase : List[Any] = len(str(_lowerCAmelCase ).split('.' )[1] )
__lowerCamelCase : Any = int(decimal * (10**number_of_frac_digits) )
__lowerCamelCase : Union[str, Any] = 10**number_of_frac_digits
__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = denominator, numerator
while True:
__lowerCamelCase : List[str] = dividend % divisor
if remainder == 0:
break
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = divisor, remainder
__lowerCamelCase ,__lowerCamelCase : Dict = numerator / divisor, denominator / divisor
return int(_lowerCAmelCase ), int(_lowerCAmelCase )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(8_9.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 208 |
'''simple docstring'''
_UpperCamelCase = tuple[float, float, float]
_UpperCamelCase = tuple[float, float, float]
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Vectorad:
__lowerCamelCase : Any = end_pointa[0] - end_pointa[0]
__lowerCamelCase : str = end_pointa[1] - end_pointa[1]
__lowerCamelCase : Tuple = end_pointa[2] - end_pointa[2]
return (x, y, z)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Vectorad:
__lowerCamelCase : List[str] = ab[1] * ac[2] - ab[2] * ac[1] # *i
__lowerCamelCase : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__lowerCamelCase : List[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> bool:
return tuple(round(_lowerCAmelCase ,_lowerCAmelCase ) for x in vector ) == (0, 0, 0)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 10 ) -> bool:
__lowerCamelCase : str = create_vector(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : Dict = create_vector(_lowerCAmelCase ,_lowerCAmelCase )
return is_zero_vector(get_ad_vectors_cross(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase )
| 208 | 1 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowerCAmelCase :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class _UpperCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , **A ) -> Any:
super().__init__(**_lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , A , **A ) -> Optional[int]:
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , **A ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[str] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_UpperCAmelCase : str = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def __lowerCAmelCase ( self , A , A=None , A="This is a sound of {}." ) -> List[Any]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCAmelCase : int = requests.get(_lowerCAmelCase ).content
else:
with open(_lowerCAmelCase , '''rb''' ) as f:
_UpperCAmelCase : Dict = f.read()
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = ffmpeg_read(_lowerCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(_lowerCAmelCase , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
_UpperCAmelCase : Dict = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
_UpperCAmelCase : Any = candidate_labels
_UpperCAmelCase : int = [hypothesis_template.format(_lowerCAmelCase ) for x in candidate_labels]
_UpperCAmelCase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework , padding=_lowerCAmelCase )
_UpperCAmelCase : str = [text_inputs]
return inputs
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : List[str] = model_inputs.pop('''candidate_labels''' )
_UpperCAmelCase : Tuple = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : int = text_inputs[0][0]
_UpperCAmelCase : int = self.model(**_lowerCAmelCase , **_lowerCAmelCase )
_UpperCAmelCase : str = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def __lowerCAmelCase ( self , A ) -> Dict:
_UpperCAmelCase : List[str] = model_outputs.pop('''candidate_labels''' )
_UpperCAmelCase : Optional[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
_UpperCAmelCase : Optional[int] = logits.softmax(dim=0 )
_UpperCAmelCase : Any = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
_UpperCAmelCase : int = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda A : -x[0] )
]
return result
| 370 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase :List[Any] = 16
_lowerCAmelCase :Tuple = 32
def lowerCamelCase_ (UpperCamelCase__ : Accelerator , UpperCamelCase__ : DatasetDict , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : int = 16 ):
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCAmelCase : Union[str, Any] = DatasetDict(
{
'''train''': dataset['''train'''].select(UpperCamelCase__ ),
'''validation''': dataset['''train'''].select(UpperCamelCase__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(UpperCamelCase__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : str = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : int = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : List[str] = 8
else:
_UpperCAmelCase : Optional[int] = None
return tokenizer.pad(
UpperCamelCase__ , padding='''longest''' , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_UpperCAmelCase : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets['''test'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
# New Code #
_UpperCAmelCase : Optional[Any] = []
# Download the dataset
_UpperCAmelCase : str = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[str] = config['''lr''']
_UpperCAmelCase : Union[str, Any] = int(config['''num_epochs'''] )
_UpperCAmelCase : Dict = int(config['''seed'''] )
_UpperCAmelCase : Optional[int] = int(config['''batch_size'''] )
_UpperCAmelCase : List[str] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : str = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Dict = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
# New Code #
# Create our folds:
_UpperCAmelCase : Optional[Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
_UpperCAmelCase : str = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = get_fold_dataloaders(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Optional[Any] = model(**UpperCamelCase__ )
_UpperCAmelCase : List[Any] = outputs.loss
_UpperCAmelCase : Dict = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : int = model(**UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_UpperCAmelCase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , UpperCamelCase__ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : int = []
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
_UpperCAmelCase : Dict = outputs.logits
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : Tuple = torch.cat(UpperCamelCase__ , dim=0 )
_UpperCAmelCase : List[str] = torch.stack(UpperCamelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[Any] = metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ )
accelerator.print('''Average test metrics from all folds:''' , UpperCamelCase__ )
def lowerCamelCase_ ():
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=UpperCamelCase__ , default=3 , help='''The number of splits to perform across the dataset''' )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 68 | 0 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowercase__ : Optional[Any] = '''src/diffusers'''
# Matches is_xxx_available()
lowercase__ : Optional[Any] = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
lowercase__ : Dict = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
lowercase__ : int = '''
{0} = None
'''
lowercase__ : Optional[Any] = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
lowercase__ : int = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def __lowercase ( _a ):
snake_case_ : List[str] = _re_backend.findall(_UpperCamelCase )
if len(_UpperCamelCase ) == 0:
return None
return "_and_".join(_UpperCamelCase )
def __lowercase ( ):
with open(os.path.join(_UpperCamelCase , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case_ : Union[str, Any] = f.readlines()
# Get to the point we do the actual imports for type checking
snake_case_ : List[Any] = 0
snake_case_ : Dict = {}
# Go through the end of the file
while line_index < len(_UpperCamelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case_ : Tuple = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
snake_case_ : Any = []
# Until we unindent, add backend objects to the list
while line_index < len(_UpperCamelCase ) and len(lines[line_index] ) > 1:
snake_case_ : Optional[int] = lines[line_index]
snake_case_ : Optional[int] = _re_single_line_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_UpperCamelCase ) > 0:
snake_case_ : List[Any] = objects
else:
line_index += 1
return backend_specific_objects
def __lowercase ( _a , _a ):
if name.isupper():
return DUMMY_CONSTANT.format(_UpperCamelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_UpperCamelCase , _UpperCamelCase )
else:
return DUMMY_CLASS.format(_UpperCamelCase , _UpperCamelCase )
def __lowercase ( _a=None ):
if backend_specific_objects is None:
snake_case_ : Dict = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case_ : Any = {}
for backend, objects in backend_specific_objects.items():
snake_case_ : List[Any] = '[' + ', '.join(f"\"{b}\"" for b in backend.split('''_and_''' ) ) + ']'
snake_case_ : Optional[int] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_UpperCamelCase , _UpperCamelCase ) for o in objects] )
snake_case_ : Optional[Any] = dummy_file
return dummy_files
def __lowercase ( _a=False ):
snake_case_ : str = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case_ : Optional[Any] = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
snake_case_ : Dict = os.path.join(_UpperCamelCase , '''utils''' )
snake_case_ : Dict = {
backend: os.path.join(_UpperCamelCase , f"dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py" )
for backend in dummy_files.keys()
}
snake_case_ : str = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case_ : Union[str, Any] = f.read()
else:
snake_case_ : List[str] = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py as the main "
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f"diffusers.utils.dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py. Run `make fix-copies` "
'''to fix this.''' )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase__ : List[str] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 264 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size | 86 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Tuple=16 , UpperCamelCase : int=3 , UpperCamelCase : Dict=True , UpperCamelCase : Tuple=True , UpperCamelCase : List[str]=32 , UpperCamelCase : str=4 , UpperCamelCase : Any=[0, 1, 2, 3] , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Optional[Any]=37 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=3 , UpperCamelCase : Tuple=[1, 384, 24, 24] , UpperCamelCase : Optional[int]=True , UpperCamelCase : Tuple=None , ):
'''simple docstring'''
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : List[str] = backbone_out_indices
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : List[Any] = num_labels
__UpperCAmelCase : Any = backbone_featmap_shape
__UpperCAmelCase : Union[str, Any] = scope
__UpperCAmelCase : Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase : List[str] = (image_size // patch_size) ** 2
__UpperCAmelCase : Tuple = num_patches + 1
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_A , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Any = DPTModel(config=_A )
model.to(_A )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : int , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Optional[Any] = DPTForDepthEstimation(_A )
model.to(_A )
model.eval()
__UpperCAmelCase : str = model(_A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : Union[str, Any] = DPTForSemanticSegmentation(_A )
model.to(_A )
model.eval()
__UpperCAmelCase : Optional[int] = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Any = config_and_inputs
__UpperCAmelCase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__a = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__a = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = DPTModelTester(self )
__UpperCAmelCase : int = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(_A )
__UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _A )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_A )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = True
if model_class in get_values(_A ):
continue
__UpperCAmelCase : Any = model_class(_A )
model.to(_A )
model.train()
__UpperCAmelCase : Tuple = self._prepare_for_class(_A , _A , return_labels=_A )
__UpperCAmelCase : str = model(**_A ).loss
loss.backward()
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : int = True
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
__UpperCAmelCase : Any = model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : Optional[int] = self._prepare_for_class(_A , _A , return_labels=_A )
__UpperCAmelCase : str = model(**_A ).loss
loss.backward()
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = _config_zero_init(_A )
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(config=_A )
# Skip the check for the backbone
__UpperCAmelCase : Union[str, Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__UpperCAmelCase : Union[str, Any] = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__UpperCAmelCase : List[str] = DPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = """add"""
with self.assertRaises(_A ):
__UpperCAmelCase : List[Any] = DPTForDepthEstimation(_A )
def lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__UpperCAmelCase : Dict = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(_A )
__UpperCAmelCase : Any = prepare_img()
__UpperCAmelCase : Dict = image_processor(images=_A , return_tensors="""pt""" ).to(_A )
# forward pass
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**_A )
__UpperCAmelCase : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
__UpperCAmelCase : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _A )
__UpperCAmelCase : Any = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _A , atol=1e-4 ) )
| 364 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[int] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase__ :
"""simple docstring"""
__a = PegasusConfig
__a = {}
__a = """gelu"""
def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Any = eos_token_id
__UpperCAmelCase : Optional[int] = pad_token_id
__UpperCAmelCase : List[str] = bos_token_id
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, inputs_dict
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 20
__UpperCAmelCase : Tuple = model_class_name(UpperCamelCase )
__UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] )
__UpperCAmelCase ,__UpperCAmelCase : int = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCAmelCase : Tuple = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 20
__UpperCAmelCase : int = model_class_name(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] )
__UpperCAmelCase ,__UpperCAmelCase : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCAmelCase : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
__UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCAmelCase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__a = True
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self )
__UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Dict = model_class(UpperCamelCase )
@jax.jit
def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ):
return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
__UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : int = model_class(UpperCamelCase )
__UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCAmelCase : Any = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ):
return model.decode(
decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
__UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase )
__UpperCAmelCase : Optional[int] = np.ones((1, 1) )
__UpperCAmelCase : List[str] = model(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCAmelCase : List[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCAmelCase : List[str] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase )
__UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences
__UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
assert tgt_text == decoded
| 320 | 0 |
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ["""input_values""", """attention_mask"""]
def __init__( self : Optional[Any] , lowercase_ : int = 1 , lowercase_ : int = 16_000 , lowercase_ : float = 0.0 , lowercase_ : bool = False , lowercase_ : int = 80 , lowercase_ : int = 16 , lowercase_ : int = 64 , lowercase_ : str = "hann_window" , lowercase_ : float = 1.0 , lowercase_ : float = 80 , lowercase_ : float = 7_600 , lowercase_ : float = 1E-10 , lowercase_ : int = 2 , lowercase_ : bool = True , **lowercase_ : List[Any] , ) -> List[str]:
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_ )
UpperCAmelCase : int = do_normalize
UpperCAmelCase : List[str] = return_attention_mask
UpperCAmelCase : List[str] = num_mel_bins
UpperCAmelCase : List[str] = hop_length
UpperCAmelCase : List[Any] = win_length
UpperCAmelCase : Tuple = win_function
UpperCAmelCase : Any = frame_signal_scale
UpperCAmelCase : Dict = fmin
UpperCAmelCase : Optional[Any] = fmax
UpperCAmelCase : int = mel_floor
UpperCAmelCase : List[str] = reduction_factor
UpperCAmelCase : str = win_length * sampling_rate // 1_000
UpperCAmelCase : Union[str, Any] = hop_length * sampling_rate // 1_000
UpperCAmelCase : Dict = optimal_fft_length(self.sample_size )
UpperCAmelCase : Dict = (self.n_fft // 2) + 1
UpperCAmelCase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase_ )
UpperCAmelCase : str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase_ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase_ ( lowercase_ : List[np.ndarray] , lowercase_ : List[np.ndarray] , lowercase_ : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
UpperCAmelCase : List[str] = np.array(lowercase_ , np.intaa )
UpperCAmelCase : Dict = []
for vector, length in zip(lowercase_ , attention_mask.sum(-1 ) ):
UpperCAmelCase : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase : Tuple = padding_value
normed_input_values.append(lowercase_ )
else:
UpperCAmelCase : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : np.ndarray , ) -> np.ndarray:
UpperCAmelCase : Any = spectrogram(
lowercase_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : Union[str, Any] , lowercase_ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowercase_ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[int] = None , **lowercase_ : Union[str, Any] , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
UpperCAmelCase : Tuple = self._process_audio(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , )
else:
UpperCAmelCase : List[Any] = None
if audio_target is not None:
UpperCAmelCase : Optional[Any] = self._process_audio(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , )
if inputs is None:
return inputs_target
else:
UpperCAmelCase : List[Any] = inputs_target['input_values']
UpperCAmelCase : int = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
UpperCAmelCase : Any = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self : Any , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : bool = False , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Any , ) -> BatchFeature:
UpperCAmelCase : List[str] = isinstance(lowercase_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase : Dict = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : Optional[int] = [np.asarray(lowercase_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray ):
UpperCAmelCase : List[str] = np.asarray(lowercase_ , dtype=np.floataa )
elif isinstance(lowercase_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : Any = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : List[Any] = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase : Dict = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase : Dict = [self._extract_mel_features(lowercase_ ) for waveform in speech]
UpperCAmelCase : Optional[Any] = BatchFeature({'input_values': features} )
UpperCAmelCase : Any = self.num_mel_bins
else:
UpperCAmelCase : Optional[Any] = BatchFeature({'input_values': speech} )
UpperCAmelCase : Union[str, Any] = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
UpperCAmelCase : Union[str, Any] = feature_size_hack
# convert input values to correct format
UpperCAmelCase : List[str] = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
UpperCAmelCase : List[str] = [np.asarray(lowercase_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowercase_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase : List[Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowercase_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase : List[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase : int = padded_inputs.get('attention_mask' )
if attention_mask is not None:
UpperCAmelCase : List[Any] = [np.asarray(lowercase_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase : Optional[Any] = (
attention_mask
if self._get_padding_strategies(lowercase_ , max_length=lowercase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase : Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowercase_ , padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase : Dict = padded_inputs.convert_to_tensors(lowercase_ )
return padded_inputs
def UpperCAmelCase_ ( self : str ) -> Dict[str, Any]:
UpperCAmelCase : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase : int = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 151 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=2 , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : List[str]=10 , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=32 * 4 , lowercase_ : str=32 * 6 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=32 , ) -> Optional[int]:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : int = is_training
UpperCAmelCase : int = use_auxiliary_loss
UpperCAmelCase : List[Any] = num_queries
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : List[str] = min_size
UpperCAmelCase : Dict = max_size
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : str = mask_feature_size
def UpperCAmelCase_ ( self : int ) -> int:
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase_ )
UpperCAmelCase : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_ )
UpperCAmelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_ ) > 0.5
).float()
UpperCAmelCase : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=lowercase_ ) > 0.5).long()
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> int:
UpperCAmelCase : int = output.encoder_hidden_states
UpperCAmelCase : Any = output.pixel_decoder_hidden_states
UpperCAmelCase : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict=False ) -> Tuple:
with torch.no_grad():
UpperCAmelCase : str = MaskFormerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Union[str, Any] = model(lowercase_ , output_hidden_states=lowercase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : str ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
def comm_check_on_output(lowercase_ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Dict = model(lowercase_ )
comm_check_on_output(lowercase_ )
UpperCAmelCase : Any = model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
comm_check_on_output(lowercase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase_ : Optional[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Tuple = False
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Optional[Any] = MaskFormerModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowercase_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def UpperCAmelCase_ ( self : str ) -> List[str]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(lowercase_ )
UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase : Tuple = MaskFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Optional[Any] = (self.model_tester.min_size,) * 2
UpperCAmelCase : str = {
'pixel_values': torch.randn((2, 3, *size) , device=lowercase_ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowercase_ ),
'class_labels': torch.zeros(2 , 10 , device=lowercase_ ).long(),
}
UpperCAmelCase : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowercase_ )
UpperCAmelCase : Optional[int] = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ ).to(lowercase_ )
UpperCAmelCase : List[Any] = model(**lowercase_ , output_attentions=lowercase_ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Dict = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : Tuple = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ).loss
loss.backward()
def UpperCAmelCase_ ( self : List[str] ) -> str:
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Optional[int] = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : List[str] = True
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : List[str] = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ = 1e-4
def UpperCamelCase( ):
UpperCAmelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
UpperCAmelCase : List[Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(lowercase_ )
UpperCAmelCase : Dict = self.default_image_processor
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**lowercase_ )
UpperCAmelCase : str = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : int = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : Optional[int] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Optional[Any] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : str = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : int = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Dict = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Optional[int] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCAmelCase : Optional[int] = inputs['pixel_values'].to(lowercase_ )
UpperCAmelCase : Optional[Any] = [el.to(lowercase_ ) for el in inputs['mask_labels']]
UpperCAmelCase : List[str] = [el.to(lowercase_ ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
| 151 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : float, A_ : float, A_ : float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def snake_case_ ( A_ : float, A_ : float, A_ : float, ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def snake_case_ ( A_ : float, A_ : float, A_ : float, ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
A_, nominal_annual_percentage_rate / 3_65, number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 |
"""simple docstring"""
from itertools import permutations
def snake_case_ ( A_ : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_lowerCamelCase : Any = [7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def snake_case_ ( A_ : int = 10 ):
'''simple docstring'''
return sum(
int(''''''.join(map(A_, A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 175 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('input must be a negative integer' )
__lowerCamelCase = len(bin(UpperCamelCase__ )[3:] )
__lowerCamelCase = bin(abs(UpperCamelCase__ ) - (1 << binary_number_length) )[3:]
__lowerCamelCase = (
(
'1'
+ '0' * (binary_number_length - len(UpperCamelCase__ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Dict = '''Hello, World!'''
__lowerCamelCase : Optional[Any] = '''en_XX'''
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : bool ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Path("""data_bin""" )
SCREAMING_SNAKE_CASE__ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__UpperCamelCase ).parent ) , checkpoint_file=Path(__UpperCamelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(__UpperCamelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(__UpperCamelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = XmodForSequenceClassification(__UpperCamelCase ) if classification_head else XmodForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE__ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE__ = bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ = from_adapter.fca.bias
SCREAMING_SNAKE_CASE__ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ = xmod.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCamelCase )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""](xmod.extract_features(__UpperCamelCase ) )
else:
SCREAMING_SNAKE_CASE__ = xmod.model(__UpperCamelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ = torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : str = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 219 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class __a :
'''simple docstring'''
def __init__( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = psutil.Process()
SCREAMING_SNAKE_CASE__ : int = False
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = -1
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : List[Any] = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
self.thread.start()
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = False
self.thread.join()
return self.cpu_memory_peak
a :Any = PeakCPUMemory()
def _lowercase ( ) -> Optional[Any]:
# Time
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE__ : Union[str, Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.memory_allocated(__lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __lowerCAmelCase ) -> Tuple:
# Time
SCREAMING_SNAKE_CASE__ : List[Any] = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE__ : List[Any] = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
SCREAMING_SNAKE_CASE__ : int = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE__ : Optional[int] = (torch.cuda.memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
SCREAMING_SNAKE_CASE__ : Optional[Any] = (torch.cuda.max_memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
return measures
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
print(F'''{description}:''' )
print(F'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(__lowerCAmelCase )]:.2f}MiB''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 367 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE__ : int = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _A ( snake_case ) -> str:
return "".join(sorted(__lowerCAmelCase ) )
def _A ( snake_case ) -> list[str]:
return word_by_signature[signature(__lowerCAmelCase )]
_snake_case = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
_snake_case = sorted({word.strip().lower() for word in data.splitlines()})
_snake_case = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_snake_case = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 250 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self :List[Any] ,__lowercase :List[Any] ,__lowercase :Union[str, Any]=1_3 ,__lowercase :str=3_0 ,__lowercase :Optional[Any]=2 ,__lowercase :int=3 ,__lowercase :List[Any]=True ,__lowercase :Tuple=True ,__lowercase :List[Any]=3_2 ,__lowercase :str=2 ,__lowercase :Union[str, Any]=4 ,__lowercase :Dict=3_7 ,__lowercase :List[Any]="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :str=0.1 ,__lowercase :Union[str, Any]=1_0 ,__lowercase :Optional[Any]=0.02 ,__lowercase :Union[str, Any]=3 ,__lowercase :Any=0.6 ,__lowercase :List[str]=None ,):
snake_case__ : str = parent
snake_case__ : int = batch_size
snake_case__ : Dict = image_size
snake_case__ : List[str] = patch_size
snake_case__ : str = num_channels
snake_case__ : int = is_training
snake_case__ : List[str] = use_labels
snake_case__ : List[str] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Any = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : Optional[Any] = mask_ratio
snake_case__ : List[str] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case__ : str = (image_size // patch_size) ** 2
snake_case__ : List[str] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Any = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self :List[Any] ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def __lowerCamelCase ( self :List[str] ,__lowercase :Union[str, Any] ,__lowercase :Dict ,__lowercase :List[str] ):
snake_case__ : Optional[int] = TFViTMAEModel(config=__lowercase )
snake_case__ : int = model(__lowercase ,training=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :List[str] ,__lowercase :Optional[int] ,__lowercase :int ):
snake_case__ : Dict = TFViTMAEForPreTraining(__lowercase )
snake_case__ : Optional[int] = model(__lowercase ,training=__lowercase )
# expected sequence length = num_patches
snake_case__ : Optional[Any] = (self.image_size // self.patch_size) ** 2
snake_case__ : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case__ : Tuple = 1
snake_case__ : List[Any] = TFViTMAEForPreTraining(__lowercase )
snake_case__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Any = model(__lowercase ,training=__lowercase )
snake_case__ : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
((snake_case__) , (snake_case__) , (snake_case__)) : Optional[Any] = config_and_inputs
snake_case__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase : Union[str, Any] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Dict = False
__lowerCAmelCase : List[str] = False
def __lowerCamelCase ( self :Any ):
snake_case__ : Union[str, Any] = TFViTMAEModelTester(self )
snake_case__ : Dict = ConfigTester(self ,config_class=__lowercase ,has_text_modality=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __lowerCamelCase ( self :Dict ):
pass
def __lowerCamelCase ( self :Tuple ):
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
snake_case__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase ,tf.keras.layers.Layer ) )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__lowercase )
snake_case__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Tuple = [*signature.parameters.keys()]
snake_case__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__lowercase )
def __lowerCamelCase ( self :str ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase )
def __lowerCamelCase ( self :int ):
# make the mask reproducible
np.random.seed(2 )
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = int((config.image_size // config.patch_size) ** 2 )
snake_case__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(__lowercase )
snake_case__ : Union[str, Any] = self._prepare_for_class(__lowercase ,__lowercase )
snake_case__ : Any = model(__lowercase ,noise=__lowercase )
snake_case__ : Optional[int] = copy.deepcopy(self._prepare_for_class(__lowercase ,__lowercase ) )
snake_case__ : List[Any] = model(**__lowercase ,noise=__lowercase )
snake_case__ : Optional[Any] = outputs_dict[0].numpy()
snake_case__ : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1e-6 )
def __lowerCamelCase ( self :Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
snake_case__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__lowercase :Dict ):
snake_case__ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowercase ):
snake_case__ : Dict = v.numpy()
else:
snake_case__ : str = np.array(__lowercase )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__lowercase )
snake_case__ : List[Any] = self._prepare_for_class(__lowercase ,__lowercase )
snake_case__ : Dict = prepare_numpy_arrays(__lowercase )
snake_case__ : Tuple = model(__lowercase ,noise=__lowercase )
snake_case__ : Dict = model(**__lowercase ,noise=__lowercase )
self.assert_outputs_same(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[Any] ,__lowercase :List[str] ):
# make masks reproducible
np.random.seed(2 )
snake_case__ : Union[str, Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case__ : Any = tf.constant(__lowercase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case__ : Optional[Any] = tf_noise
super().check_pt_tf_models(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Dict ):
# make mask reproducible
np.random.seed(2 )
snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__lowercase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__lowercase ,__lowercase ),)
if isinstance(__lowercase ,__lowercase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowercase ,'''_keras_serializable''' ,__lowercase )
}
snake_case__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case__ : Any = tf.convert_to_tensor(__lowercase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case__ : List[Any] = main_layer_class(__lowercase )
snake_case__ : Union[str, Any] = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case__ : Optional[Any] = tf.keras.Model(__lowercase ,outputs=main_layer(__lowercase ) )
snake_case__ : List[str] = model(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : List[str] = os.path.join(__lowercase ,'''keras_model.h5''' )
model.save(__lowercase )
snake_case__ : List[str] = tf.keras.models.load_model(
__lowercase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__lowercase ,tf.keras.Model )
snake_case__ : Union[str, Any] = model(__lowercase )
self.assert_outputs_same(__lowercase ,__lowercase )
@slow
def __lowerCamelCase ( self :Any ):
# make mask reproducible
np.random.seed(2 )
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = int((config.image_size // config.patch_size) ** 2 )
snake_case__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(__lowercase )
snake_case__ : str = self._prepare_for_class(__lowercase ,__lowercase )
snake_case__ : List[Any] = model(__lowercase ,noise=__lowercase )
if model_class.__name__ == "TFViTMAEModel":
snake_case__ : List[Any] = outputs.last_hidden_state.numpy()
snake_case__ : List[Any] = 0
else:
snake_case__ : Any = outputs.logits.numpy()
snake_case__ : Tuple = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase ,saved_model=__lowercase )
snake_case__ : Optional[Any] = model_class.from_pretrained(__lowercase )
snake_case__ : Any = model(__lowercase ,noise=__lowercase )
if model_class.__name__ == "TFViTMAEModel":
snake_case__ : Dict = after_outputs['''last_hidden_state'''].numpy()
snake_case__ : List[Any] = 0
else:
snake_case__ : Any = after_outputs['''logits'''].numpy()
snake_case__ : Optional[Any] = 0
snake_case__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase ,1e-5 )
def __lowerCamelCase ( self :int ):
# make mask reproducible
np.random.seed(2 )
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = int((config.image_size // config.patch_size) ** 2 )
snake_case__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__lowercase )
snake_case__ : int = self._prepare_for_class(__lowercase ,__lowercase )
snake_case__ : List[Any] = model(__lowercase ,noise=__lowercase )
snake_case__ : int = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowercase )
snake_case__ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case__ : Optional[int] = model_class.from_config(model.config )
snake_case__ : Tuple = new_model(__lowercase ) # Build model
new_model.set_weights(model.get_weights() )
snake_case__ : List[Any] = new_model(__lowercase ,noise=__lowercase )
self.assert_outputs_same(__lowercase ,__lowercase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __lowerCamelCase ( self :List[Any] ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __lowerCamelCase ( self :Tuple ):
pass
@slow
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__lowercase )
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self :List[str] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self :Any ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
snake_case__ : Tuple = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
snake_case__ : int = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Tuple = image_processor(images=__lowercase ,return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case__ : str = ViTMAEConfig()
snake_case__ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case__ : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case__ : List[str] = model(**__lowercase ,noise=__lowercase )
# verify the logits
snake_case__ : Optional[Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape ,__lowercase )
snake_case__ : Tuple = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,__lowercase ,atol=1e-4 )
| 230 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "data2vec-vision"
def __init__( self : Union[str, Any] , __snake_case : str=7_68 , __snake_case : Dict=12 , __snake_case : List[Any]=12 , __snake_case : List[Any]=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : List[Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Union[str, Any]=1e-12 , __snake_case : Any=2_24 , __snake_case : Dict=16 , __snake_case : int=3 , __snake_case : str=False , __snake_case : List[Any]=False , __snake_case : str=False , __snake_case : Tuple=False , __snake_case : Tuple=0.1 , __snake_case : int=0.1 , __snake_case : int=True , __snake_case : Dict=[3, 5, 7, 11] , __snake_case : Optional[int]=[1, 2, 3, 6] , __snake_case : str=True , __snake_case : int=0.4 , __snake_case : Any=2_56 , __snake_case : str=1 , __snake_case : int=False , __snake_case : Tuple=2_55 , **__snake_case : List[str] , )-> Dict:
super().__init__(**__snake_case )
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = use_mask_token
snake_case = use_absolute_position_embeddings
snake_case = use_relative_position_bias
snake_case = use_shared_relative_position_bias
snake_case = layer_scale_init_value
snake_case = drop_path_rate
snake_case = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case = out_indices
snake_case = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case = use_auxiliary_head
snake_case = auxiliary_loss_weight
snake_case = auxiliary_channels
snake_case = auxiliary_num_convs
snake_case = auxiliary_concat_input
snake_case = semantic_loss_ignore_index
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = version.parse("1.11" )
@property
def lowerCAmelCase ( self : int )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : List[Any] )-> float:
return 1e-4
| 3 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> List[str]:
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__A )
lowerCAmelCase_ :List[str] = self.values[key]
def __lowerCAmelCase ( self ) -> Optional[Any]:
return (
sum(self.charge_factor - len(__A ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __lowerCAmelCase ( self , __A , __A=None ) -> List[str]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__A ) == 0
):
return key
return super()._collision_resolution(__A , __A )
| 84 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = 'src/transformers'
__UpperCAmelCase = 'docs/source/en/tasks'
def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ :List[Any] = f.readlines()
# Find the start prompt.
lowerCAmelCase_ :Tuple = 0
while not lines[start_index].startswith(lowercase__ ):
start_index += 1
start_index += 1
lowerCAmelCase_ :Dict = start_index
while not lines[end_index].startswith(lowercase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def _snake_case ( lowercase__ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() )
lowerCAmelCase_ :Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file(
filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 84 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : str ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
_UpperCamelCase = '''A painting of a squirrel eating a burger'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = num_samples * [prompt]
_UpperCamelCase = sd_pipe.prepare_inputs(__UpperCamelCase )
_UpperCamelCase = replicate(__UpperCamelCase )
_UpperCamelCase = shard(__UpperCamelCase )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(__UpperCamelCase , jax.device_count() )
_UpperCamelCase = sd_pipe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , num_inference_steps=25 , jit=__UpperCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase = '''stabilityai/stable-diffusion-2'''
_UpperCamelCase , _UpperCamelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(__UpperCamelCase , subfolder='''scheduler''' )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
__UpperCamelCase , scheduler=__UpperCamelCase , revision='''bf16''' , dtype=jnp.bfloataa , )
_UpperCamelCase = scheduler_params
_UpperCamelCase = '''A painting of a squirrel eating a burger'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = num_samples * [prompt]
_UpperCamelCase = sd_pipe.prepare_inputs(__UpperCamelCase )
_UpperCamelCase = replicate(__UpperCamelCase )
_UpperCamelCase = shard(__UpperCamelCase )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(__UpperCamelCase , jax.device_count() )
_UpperCamelCase = sd_pipe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , num_inference_steps=25 , jit=__UpperCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 354 | """simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase ( a__ : list[list[int]] ) -> list[list[int]]:
_UpperCamelCase = []
for i in range(len(a__ ) ):
_UpperCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_UpperCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(a__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(a__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(a__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_UpperCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(a__ )
return next_generation
def lowercase ( a__ : list[list[int]] , a__ : int ) -> list[Image.Image]:
_UpperCamelCase = []
for _ in range(a__ ):
# Create output image
_UpperCamelCase = Image.new('''RGB''' , (len(cells[0] ), len(a__ )) )
_UpperCamelCase = img.load()
# Save cells to image
for x in range(len(a__ ) ):
for y in range(len(cells[0] ) ):
_UpperCamelCase = 255 - cells[y][x] * 255
_UpperCamelCase = (colour, colour, colour)
# Save image
images.append(a__ )
_UpperCamelCase = new_generation(a__ )
return images
if __name__ == "__main__":
UpperCAmelCase = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 54 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
)
def a__ ( ) -> None:
__lowerCAmelCase: Any = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__lowerCAmelCase: Dict = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print(F"Optimal value : {minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 217 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__A = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Any = RobertaTokenizer
def __init__( self : Optional[int] , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int="replace" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : int="<mask>" , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Tuple , )-> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: str = getattr(UpperCamelCase__ , pre_tok_state.pop("type"))
__lowerCAmelCase: Optional[int] = add_prefix_space
__lowerCAmelCase: Dict = pre_tok_class(**UpperCamelCase__)
__lowerCAmelCase: Any = add_prefix_space
__lowerCAmelCase: int = "post_processor"
__lowerCAmelCase: Optional[Any] = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
if tokenizer_component_instance:
__lowerCAmelCase: Dict = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase: List[Any] = tuple(state["sep"])
if "cls" in state:
__lowerCAmelCase: str = tuple(state["cls"])
__lowerCAmelCase: str = False
if state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: Optional[Any] = add_prefix_space
__lowerCAmelCase: List[str] = True
if state.get("trim_offsets" , UpperCamelCase__) != trim_offsets:
__lowerCAmelCase: Any = trim_offsets
__lowerCAmelCase: List[str] = True
if changes_to_apply:
__lowerCAmelCase: str = getattr(UpperCamelCase__ , state.pop("type"))
__lowerCAmelCase: List[str] = component_class(**UpperCamelCase__)
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
@property
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def lowercase_ ( self : Tuple , UpperCamelCase__ : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else value
__lowerCAmelCase: int = value
def lowercase_ ( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: List[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
__lowerCAmelCase: str = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__)
return tuple(UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=None)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = [self.sep_token_id]
__lowerCAmelCase: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 217 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'vit_mae'
def __init__( self : str , _A : Dict=768 , _A : List[str]=12 , _A : Optional[int]=12 , _A : Optional[int]=3_072 , _A : Optional[Any]="gelu" , _A : Tuple=0.0 , _A : Tuple=0.0 , _A : Optional[Any]=0.0_2 , _A : Optional[Any]=1e-12 , _A : Union[str, Any]=224 , _A : str=16 , _A : Dict=3 , _A : List[Any]=True , _A : Optional[int]=16 , _A : Any=512 , _A : str=8 , _A : int=2_048 , _A : Optional[Any]=0.7_5 , _A : int=False , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : str = patch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Union[str, Any] = qkv_bias
UpperCAmelCase__ : Union[str, Any] = decoder_num_attention_heads
UpperCAmelCase__ : int = decoder_hidden_size
UpperCAmelCase__ : int = decoder_num_hidden_layers
UpperCAmelCase__ : Dict = decoder_intermediate_size
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = norm_pix_loss
| 299 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 1_6
UpperCamelCase__ = 3_2
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict:
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase__ : str = DatasetDict(
{
'''train''': dataset['''train'''].select(lowerCAmelCase__ ),
'''validation''': dataset['''train'''].select(lowerCAmelCase__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ : Any = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ : Dict = 8
else:
UpperCAmelCase__ : List[Any] = None
return tokenizer.pad(
lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
# New Code #
UpperCAmelCase__ : List[str] = []
# Download the dataset
UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ : Any = config['''lr''']
UpperCAmelCase__ : Any = int(config['''num_epochs'''] )
UpperCAmelCase__ : Any = int(config['''seed'''] )
UpperCAmelCase__ : Dict = int(config['''batch_size'''] )
UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
# New Code #
# Create our folds:
UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
UpperCAmelCase__ : Dict = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
UpperCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Dict = outputs.loss
UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : str = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
UpperCAmelCase__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ )
# New Code #
# We also run predictions on the test set at the very end
UpperCAmelCase__ : int = []
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : str = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = outputs.logits
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 )
UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ )
def a__ ( ) -> Any:
UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' )
UpperCAmelCase__ : Tuple = parser.parse_args()
UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 299 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase (_a ):
def __init__( self: Optional[Any],A_: List[str]=None,**A_: Any ):
'''simple docstring'''
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.',_snake_case,)
super().__init__(args=_snake_case,**_snake_case )
| 310 |
from datetime import datetime
import requests
def A (__A : str ) -> bytes:
"""simple docstring"""
UpperCAmelCase_ = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase_ = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__A ).content
if __name__ == "__main__":
snake_case_ : Optional[Any] = input("Enter Video/IGTV url: ").strip()
snake_case_ : Any = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 51 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__UpperCAmelCase = get_logger(__name__)
class __a ( enum.Enum ):
__snake_case : Union[str, Any] = """all_checks"""
__snake_case : List[Any] = """basic_checks"""
__snake_case : Any = """no_checks"""
class __a ( __UpperCamelCase ):
pass
class __a ( __UpperCamelCase ):
pass
class __a ( __UpperCamelCase ):
pass
class __a ( __UpperCamelCase ):
pass
def __UpperCamelCase ( lowercase__ : Optional[dict] , lowercase__ : dict , lowercase__ : str=None ) -> Any:
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase__ ) - set(lowercase__ ) ) )
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase__ ) - set(lowercase__ ) ) )
lowerCAmelCase_ : Optional[Any] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowerCAmelCase_ : Dict = """ for """ + verification_name if verification_name is not None else """"""
if len(lowercase__ ) > 0:
raise NonMatchingChecksumError(
f'Checksums didn\'t match{for_verification_name}:\n'
f'{bad_urls}\n'
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __a ( __UpperCamelCase ):
pass
class __a ( __UpperCamelCase ):
pass
class __a ( __UpperCamelCase ):
pass
class __a ( __UpperCamelCase ):
pass
def __UpperCamelCase ( lowercase__ : Optional[dict] , lowercase__ : dict ) -> List[Any]:
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase__ ) - set(lowercase__ ) ) )
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise UnexpectedSplits(str(set(lowercase__ ) - set(lowercase__ ) ) )
lowerCAmelCase_ : Any = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase__ ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase__ ) )
logger.info("""All the splits matched successfully.""" )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : bool = True ) -> dict:
'''simple docstring'''
if record_checksum:
lowerCAmelCase_ : Optional[int] = shaaaa()
with open(lowercase__ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"""""" ):
m.update(lowercase__ )
lowerCAmelCase_ : Any = m.hexdigest()
else:
lowerCAmelCase_ : int = None
return {"num_bytes": os.path.getsize(lowercase__ ), "checksum": checksum}
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> int:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 362 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( __UpperCamelCase ):
__snake_case : Any = ["""image_processor""", """tokenizer"""]
__snake_case : Tuple = """BlipImageProcessor"""
__snake_case : int = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ):
lowerCAmelCase_ : str = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCAmelCase_ : str = self.tokenizer
lowerCAmelCase_ : List[Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
# add pixel_values
lowerCAmelCase_ : Union[str, Any] = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
if text is not None:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
else:
lowerCAmelCase_ : int = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def A ( self : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A ( self : int ):
lowerCAmelCase_ : int = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 28 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
_lowerCAmelCase = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(SCREAMING_SNAKE_CASE_ ) )
return round(SCREAMING_SNAKE_CASE_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : torch.FloatTensor
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ):
@register_to_config
def __init__( self , _lowerCAmelCase = 16 , _lowerCAmelCase = 88 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 32 , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = "geglu" , _lowerCAmelCase = True , _lowerCAmelCase = True , ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = attention_head_dim
_lowerCAmelCase = num_attention_heads * attention_head_dim
_lowerCAmelCase = in_channels
_lowerCAmelCase = torch.nn.GroupNorm(num_groups=_lowerCAmelCase , num_channels=_lowerCAmelCase , eps=1E-6 , affine=_lowerCAmelCase )
_lowerCAmelCase = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
# 3. Define transformers blocks
_lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dropout=_lowerCAmelCase , cross_attention_dim=_lowerCAmelCase , activation_fn=_lowerCAmelCase , attention_bias=_lowerCAmelCase , double_self_attention=_lowerCAmelCase , norm_elementwise_affine=_lowerCAmelCase , )
for d in range(_lowerCAmelCase )
] )
_lowerCAmelCase = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase=None , _lowerCAmelCase = True , ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = hidden_states.shape
_lowerCAmelCase = batch_frames // num_frames
_lowerCAmelCase = hidden_states
_lowerCAmelCase = hidden_states[None, :].reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
_lowerCAmelCase = self.norm(_lowerCAmelCase )
_lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.proj_in(_lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
_lowerCAmelCase = block(
_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , timestep=_lowerCAmelCase , cross_attention_kwargs=_lowerCAmelCase , class_labels=_lowerCAmelCase , )
# 3. Output
_lowerCAmelCase = self.proj_out(_lowerCAmelCase )
_lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
_lowerCAmelCase = hidden_states.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_lowerCAmelCase )
| 158 | 1 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _A (lowerCAmelCase__ :bytes ) -> bytes:
'''simple docstring'''
if len(lowerCAmelCase__ ) != 32:
raise ValueError('Input must be of length 32' )
_a = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A (lowerCAmelCase__ :int ) -> bytes:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
_a = format(lowerCAmelCase__ , '08x' )[-8:]
_a = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A (lowerCAmelCase__ :bytes ) -> bytes:
'''simple docstring'''
_a = B''
for char in message:
bit_string += format(lowerCAmelCase__ , '08b' ).encode('utf-8' )
_a = format(len(lowerCAmelCase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowerCAmelCase__ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A (lowerCAmelCase__ :bytes ) -> Generator[list[int], None, None]:
'''simple docstring'''
if len(lowerCAmelCase__ ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowerCAmelCase__ ) , 5_12 ):
_a = bit_string[pos : pos + 5_12]
_a = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
_a = format(lowerCAmelCase__ , '032b' )
_a = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowerCAmelCase__ , 2 )
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (a + b) % 2**32
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A (lowerCAmelCase__ :bytes ) -> bytes:
'''simple docstring'''
_a = preprocess(lowerCAmelCase__ )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0x6_7_4_5_2_3_0_1
_a = 0xe_f_c_d_a_b_8_9
_a = 0x9_8_b_a_d_c_f_e
_a = 0x1_0_3_2_5_4_7_6
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowerCAmelCase__ ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(lowerCAmelCase__ ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(lowerCAmelCase__ , left_rotate_aa(lowerCAmelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(lowerCAmelCase__ , lowerCAmelCase__ )
_a = sum_aa(lowerCAmelCase__ , lowerCAmelCase__ )
_a = sum_aa(lowerCAmelCase__ , lowerCAmelCase__ )
_a = sum_aa(lowerCAmelCase__ , lowerCAmelCase__ )
_a = reformat_hex(lowerCAmelCase__ ) + reformat_hex(lowerCAmelCase__ ) + reformat_hex(lowerCAmelCase__ ) + reformat_hex(lowerCAmelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ : List[Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.