code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase__ = {'''bert_for_seq_generation''': 512}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : Tuple="<s>" ,lowercase__ : Union[str, Any]="</s>" ,lowercase__ : str="<unk>" ,lowercase__ : Tuple="<pad>" ,lowercase__ : Union[str, Any]="<::::>" ,lowercase__ : Optional[Dict[str, Any]] = None ,**lowercase__ : Any ,):
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,sep_token=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase__ ,)
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Optional[int] ,lowercase__ : Optional[Any] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : str ):
return self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
return self.sp_model.piece_to_id(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.IdToPiece(lowercase__ )
return token
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = []
__lowercase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase__ ) + token
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 104 | 1 |
'''simple docstring'''
from statistics import mean, stdev
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 3 ) -> list:
_a : Any = min(lowerCAmelCase_ )
_a : Dict = max(lowerCAmelCase_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase_ ) for x in data]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 3 ) -> list:
_a : List[str] = mean(lowerCAmelCase_ )
_a : str = stdev(lowerCAmelCase_ )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase_ ) for x in data]
| 364 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __magic_name__ :
lowerCAmelCase : str = field(
metadata={'help': 'The output directory where the model will be written.'} , )
lowerCAmelCase : str = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
lowerCAmelCase : str = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def __lowerCamelCase ( ) -> Union[str, Any]:
_a : Any = HfArgumentParser((ModelArguments,) )
((_a) , ) : Dict = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_a : Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_a : Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_a : List[str] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_a : Optional[int] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_a : List[Any] = True
_a : int = True
_a : Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCAmelCase_ , decoder_config=lowerCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_a : List[str] = decoder_config.decoder_start_token_id
_a : Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_a : Tuple = decoder_config.bos_token_id
if pad_token_id is None:
_a : List[Any] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_a : Any = decoder_config.eos_token_id
_a : Tuple = decoder_start_token_id
_a : Any = pad_token_id
_a : Dict = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_a : Dict = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_a : int = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 107 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'ctrl'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __snake_case=24_6534 , __snake_case=256 , __snake_case=1280 , __snake_case=8192 , __snake_case=48 , __snake_case=16 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1e-6 , __snake_case=0.02 , __snake_case=True , **__snake_case , ) -> Union[str, Any]:
'''simple docstring'''
__a =vocab_size
__a =n_positions
__a =n_embd
__a =n_layer
__a =n_head
__a =dff
__a =resid_pdrop
__a =embd_pdrop
__a =layer_norm_epsilon
__a =initializer_range
__a =use_cache
super().__init__(**__snake_case )
| 218 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'ctrl'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __snake_case=24_6534 , __snake_case=256 , __snake_case=1280 , __snake_case=8192 , __snake_case=48 , __snake_case=16 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1e-6 , __snake_case=0.02 , __snake_case=True , **__snake_case , ) -> Union[str, Any]:
'''simple docstring'''
__a =vocab_size
__a =n_positions
__a =n_embd
__a =n_layer
__a =n_head
__a =dff
__a =resid_pdrop
__a =embd_pdrop
__a =layer_norm_epsilon
__a =initializer_range
__a =use_cache
super().__init__(**__snake_case )
| 218 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any]=13 , _lowerCAmelCase : int=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=99 , _lowerCAmelCase : Optional[Any]=32 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : List[Any]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Dict=5_12 , _lowerCAmelCase : str=16 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Dict=False , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]="None" , _lowerCAmelCase : int=3 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : List[Any]=None , ):
__snake_case : List[str] = parent
__snake_case : int = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : List[str] = is_training
__snake_case : Optional[int] = use_input_mask
__snake_case : Optional[Any] = use_token_type_ids
__snake_case : List[str] = use_labels
__snake_case : List[Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Optional[Any] = type_vocab_size
__snake_case : Optional[Any] = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : int = num_labels
__snake_case : str = num_choices
__snake_case : Optional[int] = relative_attention
__snake_case : Optional[Any] = position_biased_input
__snake_case : Dict = pos_att_type
__snake_case : Tuple = scope
def snake_case__ ( self : Optional[Any] ):
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_token_type_ids:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : int = None
__snake_case : List[str] = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Dict = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ):
__snake_case : List[str] = TFDebertaVaModel(config=lowerCAmelCase__ )
__snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__snake_case : Any = [input_ids, input_mask]
__snake_case : str = model(lowerCAmelCase__ )
__snake_case : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
__snake_case : Union[str, Any] = TFDebertaVaForMaskedLM(config=lowerCAmelCase__ )
__snake_case : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__snake_case : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ):
__snake_case : int = self.num_labels
__snake_case : Optional[int] = TFDebertaVaForSequenceClassification(config=lowerCAmelCase__ )
__snake_case : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__snake_case : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
__snake_case : Optional[int] = self.num_labels
__snake_case : int = TFDebertaVaForTokenClassification(config=lowerCAmelCase__ )
__snake_case : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__snake_case : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple ):
__snake_case : Union[str, Any] = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase__ )
__snake_case : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__snake_case : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : List[str] ):
__snake_case : Dict = self.prepare_config_and_inputs()
(
__snake_case
) : Tuple = config_and_inputs
__snake_case : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( a__ , a__ , unittest.TestCase ):
A : Union[str, Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
A : List[str] = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Union[str, Any] = False
A : Dict = False
def snake_case__ ( self : List[Any] ):
__snake_case : str = TFDebertaVaModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def snake_case__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def snake_case__ ( self : List[str] ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : Tuple ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def snake_case__ ( self : List[str] ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def snake_case__ ( self : Any ):
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[Any] ):
__snake_case : str = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def snake_case__ ( self : List[Any] ):
pass
@slow
def snake_case__ ( self : Optional[Any] ):
__snake_case : int = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
__snake_case : Union[str, Any] = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__snake_case : str = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__snake_case : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
__snake_case : Any = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 )
| 362 | import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__snake_case : Optional[int] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : List[str] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : int = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__snake_case : Union[str, Any] = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
__snake_case : int = input_paths[compression_format]
if input_path is None:
__snake_case : int = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
__snake_case : Any = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
assert extractor_format is not None
__snake_case : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Union[str, Any] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Union[str, Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import tarfile
__snake_case : List[str] = tmp_path / """data_dot_dot"""
directory.mkdir()
__snake_case : Optional[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import tarfile
__snake_case : Dict = tmp_path / """data_sym_link"""
directory.mkdir()
__snake_case : Tuple = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__SCREAMING_SNAKE_CASE )
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Any = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
__snake_case : int = insecure_tar_files[insecure_tar_file]
__snake_case : Optional[int] = tmp_path / """extracted"""
TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case : Optional[Any] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
__snake_case : List[str] = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
| 20 | 0 |
from string import ascii_lowercase, ascii_uppercase
def A_ ( A__ ) -> str:
if not sentence:
return ""
a__ : Tuple = dict(zip(A__ , A__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 99 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
for attribute in key.split('.' ):
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase = None
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__lowercase = True
elif name.split('.' )[0] == "proj":
__lowercase = fairseq_model.proj
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowercase = 'weight_g'
elif "weight_v" in name:
__lowercase = 'weight_v'
elif "bias" in name:
__lowercase = 'bias'
elif "weight" in name:
__lowercase = 'weight'
else:
__lowercase = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
__lowercase = full_name.split('conv_layers.' )[-1]
__lowercase = name.split('.' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowercase = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__lowercase = f.readlines()
__lowercase = [line.split(' ' )[0] for line in lines]
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]:
__lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase = model[0].eval()
# set weights for wav2vec2 encoder
__lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE )
__lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowercase = False
# add projection layer
__lowercase = nn.Parameter(projection_layer.weight )
__lowercase = nn.Parameter(projection_layer.bias )
__lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = hf_wavavec.config.to_dict()
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer.eos_token_id
__lowercase = 'speech_to_text_2'
__lowercase = 'wav2vec2'
__lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 325 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 82 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=64, ) -> Union[str, Any]:
UpperCAmelCase_: int = parent
UpperCAmelCase_: Tuple = batch_size
UpperCAmelCase_: int = is_training
UpperCAmelCase_: Any = use_auxiliary_loss
UpperCAmelCase_: str = num_queries
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: Union[str, Any] = min_size
UpperCAmelCase_: Optional[Any] = max_size
UpperCAmelCase_: Tuple = num_labels
UpperCAmelCase_: Union[str, Any] = hidden_dim
UpperCAmelCase_: int = hidden_dim
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = torch.ones([self.batch_size, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
UpperCAmelCase_: Optional[int] = (torch.rand((self.batch_size, self.num_labels), device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
UpperCAmelCase_: Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case (self ) -> Any:
UpperCAmelCase_: Any = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
UpperCAmelCase_: Any = self.num_queries
UpperCAmelCase_: Dict = self.num_labels
UpperCAmelCase_: Dict = [1, 1, 1, 1]
UpperCAmelCase_: int = self.num_channels
UpperCAmelCase_: Union[str, Any] = 64
UpperCAmelCase_: List[Any] = 128
UpperCAmelCase_: Optional[Any] = self.hidden_dim
UpperCAmelCase_: str = self.hidden_dim
UpperCAmelCase_: List[str] = self.hidden_dim
return config
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = self.prepare_config_and_inputs()
UpperCAmelCase_: Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = output.encoder_hidden_states
UpperCAmelCase_: int = output.pixel_decoder_hidden_states
UpperCAmelCase_: Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), config.decoder_layers )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
with torch.no_grad():
UpperCAmelCase_: Dict = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_: Dict = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = model(
pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = MaskaFormerModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def __snake_case (self ) -> Dict:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def __snake_case (self ) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def __snake_case (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case (self ) -> Dict:
pass
def __snake_case (self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Tuple = [*signature.parameters.keys()]
UpperCAmelCase_: str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_: Any = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = (self.model_tester.min_size,) * 2
UpperCAmelCase_: str = {
"""pixel_values""": torch.randn((2, 3, *size), device=SCREAMING_SNAKE_CASE_ ),
"""mask_labels""": torch.randn((2, 10, *size), device=SCREAMING_SNAKE_CASE_ ),
"""class_labels""": torch.zeros(2, 10, device=SCREAMING_SNAKE_CASE_ ).long(),
}
UpperCAmelCase_: Dict = self.model_tester.get_config()
UpperCAmelCase_: Optional[Any] = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = model(**SCREAMING_SNAKE_CASE_, output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def __snake_case (self ) -> Optional[int]:
if not self.model_tester.is_training:
return
UpperCAmelCase_: Union[str, Any] = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_: Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : int = 1E-4
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __snake_case (self ) -> Dict:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.default_image_processor
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: str = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Dict = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Tuple = self.default_image_processor
UpperCAmelCase_: Dict = prepare_img()
UpperCAmelCase_: Any = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
UpperCAmelCase_: int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_: Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCAmelCase_: int = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
UpperCAmelCase_: Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_: Any = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Dict = self.default_image_processor
UpperCAmelCase_: str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="""pt""", )
UpperCAmelCase_: int = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""mask_labels"""]]
UpperCAmelCase_: int = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 82 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> list:
"""simple docstring"""
lowercase__ = [0] * len(A )
for i in range(1 , len(A ) ):
# use last results for better performance - dynamic programming
lowercase__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ = j
return prefix_result
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
return max(prefix_function(A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
lowercase__ ,lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A , A , bias=A )
lowercase__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowercase__ = '''relu'''
lowercase__ = state_dict['''decoder.embed_tokens.weight''']
lowercase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
UpperCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
_a : Optional[str] = field(
default=snake_case__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(snake_case__ )} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_a : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_a : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a__ :
_a : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """The input training data file (a text file)."""} )
_a : Optional[str] = field(
default=snake_case__ , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
_a : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
_a : bool = field(default=snake_case__ , metadata={"""help""": """Whether ot not to use whole word mask."""} )
_a : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
_a : float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
_a : int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
_a : int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ : DataTrainingArguments , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[str] = None , ):
def _dataset(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=SCREAMING_SNAKE_CASE_ , file_path=SCREAMING_SNAKE_CASE_ , block_size=args.block_size , ref_path=SCREAMING_SNAKE_CASE_ , )
return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE_ , file_path=SCREAMING_SNAKE_CASE_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=SCREAMING_SNAKE_CASE_ , file_path=SCREAMING_SNAKE_CASE_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=SCREAMING_SNAKE_CASE_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
__lowerCAmelCase = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE_ )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
__lowerCAmelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowerCAmelCase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowerCAmelCase = (
get_dataset(SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowerCAmelCase = (
get_dataset(SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , evaluate=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowerCAmelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowerCAmelCase = DataCollatorForWholeWordMask(
tokenizer=SCREAMING_SNAKE_CASE_ , mlm_probability=data_args.mlm_probability )
else:
__lowerCAmelCase = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , prediction_loss_only=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
__lowerCAmelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=SCREAMING_SNAKE_CASE_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output["eval_loss"] )
__lowerCAmelCase = {"perplexity": perplexity}
__lowerCAmelCase = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(SCREAMING_SNAKE_CASE_ )
return results
def UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ : List[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 367 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _a ( SCREAMING_SNAKE_CASE_ : Any ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
__lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__lowerCAmelCase = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
__lowerCAmelCase = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
__lowerCAmelCase = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
__lowerCAmelCase = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
__lowerCAmelCase = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
__lowerCAmelCase = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
__lowerCAmelCase = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
__lowerCAmelCase = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
__lowerCAmelCase = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
__lowerCAmelCase = key.replace("image_encoder.module" , "flava.image_model" )
__lowerCAmelCase = key.replace("text_encoder.module" , "flava.text_model" )
__lowerCAmelCase = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
__lowerCAmelCase = key.replace("mm_encoder.module" , "flava.multimodal_model" )
__lowerCAmelCase = key.replace("text_projection" , "flava.text_projection" )
__lowerCAmelCase = key.replace("image_projection" , "flava.image_projection" )
__lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
__lowerCAmelCase = value
return upgrade
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int=None ):
if config_path is not None:
__lowerCAmelCase = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
__lowerCAmelCase = FlavaConfig()
__lowerCAmelCase = FlavaForPreTraining(SCREAMING_SNAKE_CASE_ ).eval()
__lowerCAmelCase = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , save_checkpoint=SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
else:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
__lowerCAmelCase = upgrade_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = hf_model.state_dict()
__lowerCAmelCase = count_parameters(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = count_parameters(SCREAMING_SNAKE_CASE_ ) + count_parameters(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
UpperCamelCase__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 102 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase = 16
_UpperCamelCase = 32
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] = 16 ):
"""simple docstring"""
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Tuple = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : Any = 8
else:
__UpperCAmelCase : int = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
__UpperCAmelCase : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase = mocked_dataloaders # noqa: F811
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
__UpperCAmelCase : Dict = 2
# New Code #
__UpperCAmelCase : Any = int(args.gradient_accumulation_steps )
__UpperCAmelCase : Tuple = int(args.local_sgd_steps )
# Initialize accelerator
__UpperCAmelCase : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Optional[int] = config['''lr''']
__UpperCAmelCase : int = int(config["""num_epochs"""] )
__UpperCAmelCase : int = int(config["""seed"""] )
__UpperCAmelCase : Any = int(config["""batch_size"""] )
__UpperCAmelCase : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowerCAmelCase )
__UpperCAmelCase : Optional[int] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
__UpperCAmelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase : Any = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
with LocalSGD(
accelerator=__lowerCAmelCase , model=__lowerCAmelCase , local_sgd_steps=__lowerCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
__UpperCAmelCase : List[str] = model(**__lowerCAmelCase )
__UpperCAmelCase : List[str] = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
__UpperCAmelCase : int = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
__UpperCAmelCase : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __lowerCAmelCase )
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__lowerCAmelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 254 | import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = (KDPMaDiscreteScheduler,)
__lowerCamelCase : List[str] = 10
def UpperCAmelCase__ ( self , **snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : int ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : str =self.dummy_model()
UpperCAmelCase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Union[str, Any] =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Any =model(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : int =output.prev_sample
UpperCAmelCase : Dict =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Optional[Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : Any =self.scheduler_classes[0]
UpperCAmelCase : Optional[int] =self.get_scheduler_config()
UpperCAmelCase : Optional[Any] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[int] =self.dummy_model()
UpperCAmelCase : Union[str, Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : str =sample.to(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Dict =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =model(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =output.prev_sample
UpperCAmelCase : Any =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Union[str, Any] =torch.mean(torch.abs(snake_case__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
UpperCAmelCase : List[Any] =self.scheduler_classes[0]
UpperCAmelCase : Dict =self.get_scheduler_config()
UpperCAmelCase : List[str] =scheduler_class(**snake_case__ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case__ )
UpperCAmelCase : int =self.dummy_model()
UpperCAmelCase : Tuple =self.dummy_sample_deter.to(snake_case__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase : Optional[Any] =scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCAmelCase : int =model(snake_case__ , snake_case__ )
UpperCAmelCase : str =scheduler.step(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =output.prev_sample
UpperCAmelCase : List[str] =torch.sum(torch.abs(snake_case__ ) )
UpperCAmelCase : Dict =torch.mean(torch.abs(snake_case__ ) )
if str(snake_case__ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 348 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCamelCase : Tuple = logging.getLogger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict ) -> int:
"""simple docstring"""
__lowercase : Tuple = False
def lowerCAmelCase ( self : Dict , __a : Tuple , __a : Dict , __a : Optional[int] , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not self.initialized:
__lowercase : Union[str, Any] = RagRetriever(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , index=__a , init_retrieval=__a , )
__lowercase : int = True
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
self.retriever.index.init_index()
def lowerCAmelCase ( self : str , __a : Union[str, Any] , __a : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Union[str, Any] = self.retriever._main_retrieve(__a , __a )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : str , __a : Any , __a : int , __a : Dict , __a : int , __a : Union[str, Any]=None ) -> str:
"""simple docstring"""
if index is not None and index.is_initialized() and len(__a ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , index=__a , init_retrieval=__a , )
__lowercase : Dict = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__a , __a , __a , __a )
for worker in self.retrieval_workers
] )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCAmelCase ( self : str , __a : Optional[int] , __a : int ) -> Optional[Any]:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowercase : Union[str, Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowercase , __lowercase : Optional[int] = ray.get(random_worker.retrieve.remote(__a , __a ) )
else:
__lowercase , __lowercase : List[str] = self._main_retrieve(__a , __a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__a )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , __a : str , __a : Tuple=None , **__a : Optional[int] ) -> Tuple:
"""simple docstring"""
return super(__a , cls ).get_tokenizers(__a , __a , **__a )
@classmethod
def lowerCAmelCase ( cls : List[Any] , __a : int , __a : Dict , __a : List[str]=None , **__a : List[str] ) -> Any:
"""simple docstring"""
__lowercase : int = kwargs.pop("""config""" , __a ) or RagConfig.from_pretrained(__a , **__a )
__lowercase : Tuple = RagTokenizer.from_pretrained(__a , config=__a )
__lowercase : Union[str, Any] = rag_tokenizer.question_encoder
__lowercase : Tuple = rag_tokenizer.generator
if indexed_dataset is not None:
__lowercase : int = """custom"""
__lowercase : Dict = CustomHFIndex(config.retrieval_vector_size , __a )
else:
__lowercase : List[Any] = cls._build_index(__a )
return cls(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , retrieval_workers=__a , index=__a , ) | 306 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 306 | 1 |
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowercase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowercase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
return "".join(cipher_map.get(lowercase , lowercase ) for ch in message.upper() )
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowercase , lowercase ) for ch in message.upper() )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = input('''Enter message to encode or decode: ''' ).strip()
__lowercase = input('''Enter keyword: ''' ).strip()
__lowercase = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__lowercase = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__lowercase = create_cipher_map(lowercase )
print(func(lowercase , lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 210 | from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__a : Optional[Any] = logging.get_logger(__name__)
__a : List[str] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
else:
return _interleave_iterable_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase )
else:
return _concatenate_iterable_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase ) | 210 | 1 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_lowerCAmelCase =list(s_dict.keys() )
for key in keys:
_lowerCAmelCase =R""".*/layers_(\d+)"""
_lowerCAmelCase =key
if re.match(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , __UpperCamelCase )
_lowerCAmelCase =R"""(encoder|decoder)\/"""
if re.match(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =re.match(__UpperCamelCase , __UpperCamelCase ).groups()
if groups[0] == "encoder":
_lowerCAmelCase =re.sub(R"""/mlp/""" , R"""/1/mlp/""" , __UpperCamelCase )
_lowerCAmelCase =re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , __UpperCamelCase )
elif groups[0] == "decoder":
_lowerCAmelCase =re.sub(R"""/mlp/""" , R"""/2/mlp/""" , __UpperCamelCase )
_lowerCAmelCase =re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , __UpperCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_lowerCAmelCase =new_key.replace(__UpperCamelCase , __UpperCamelCase )
print(F'''{key} -> {new_key}''' )
_lowerCAmelCase =s_dict.pop(__UpperCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowerCAmelCase =s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowerCAmelCase =s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_lowerCAmelCase =s_dict[key].shape[0]
_lowerCAmelCase =s_dict[key]
for idx in range(__UpperCamelCase ):
_lowerCAmelCase =expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(__UpperCamelCase )
return s_dict
__A = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple:
# Convert a google style config to the hugging face fromat
import regex as re
with open(__UpperCamelCase , """r""" ) as f:
_lowerCAmelCase =f.read()
_lowerCAmelCase =re.findall(R"""(.*) = ([0-9.]*)""" , __UpperCamelCase )
_lowerCAmelCase ={}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_lowerCAmelCase =float(__UpperCamelCase ) if """.""" in value else int(__UpperCamelCase )
_lowerCAmelCase =re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , __UpperCamelCase )[0]
_lowerCAmelCase =str(activation[1] )
_lowerCAmelCase =num_experts
_lowerCAmelCase =SwitchTransformersConfig(**__UpperCamelCase )
return config
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="./" , __UpperCamelCase=8 ) -> Tuple:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_lowerCAmelCase =checkpoints.load_tax_checkpoint(__UpperCamelCase )
if gin_file is not None:
_lowerCAmelCase =convert_gin_to_config(__UpperCamelCase , __UpperCamelCase )
else:
_lowerCAmelCase =SwitchTransformersConfig.from_pretrained(__UpperCamelCase )
_lowerCAmelCase =SwitchTransformersForConditionalGeneration(__UpperCamelCase )
_lowerCAmelCase =flax_params["""target"""]
_lowerCAmelCase =flatten_dict(__UpperCamelCase , sep="""/""" )
_lowerCAmelCase =rename_keys(__UpperCamelCase )
_lowerCAmelCase =unflatten_dict(__UpperCamelCase , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
__A = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 341 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
if len(__UpperCamelCase ) <= 1:
return arr, 0
_lowerCAmelCase =len(__UpperCamelCase ) // 2
_lowerCAmelCase =arr[0:mid]
_lowerCAmelCase =arr[mid:]
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =[]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCamelCase() -> str:
_lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
# an empty list should also have zero inversions
_lowerCAmelCase =[]
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
A_ : str = XLNetTokenizer
A_ : Any = XLNetTokenizerFast
A_ : List[str] = True
A_ : int = True
def a (self : Dict ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = XLNetTokenizer(_a , keep_accents=_a )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = """<s>"""
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(_a ) , 1006 )
def a (self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = XLNetTokenizer(_a , keep_accents=_a )
__snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] )
__snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__snake_case = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = XLNetTokenizer(_a , do_lower_case=_a )
__snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = XLNetTokenizer(_a , do_lower_case=_a )
__snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def a (self : Any ):
"""simple docstring"""
__snake_case = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
__snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__snake_case = tokenizer.build_inputs_with_special_tokens(_a )
__snake_case = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def a (self : str ):
"""simple docstring"""
__snake_case = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 24 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=2 , _a=24 , _a=16 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , _a=2 , _a=2 , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : str = max_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_mel_bins
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : List[str] = frequency_stride
SCREAMING_SNAKE_CASE__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE__ : Any = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE__ : Any = num_patches + 2
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_values, labels
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ASTModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :Dict = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
_SCREAMING_SNAKE_CASE :Any = False
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
_SCREAMING_SNAKE_CASE :Tuple = False
def _a ( self , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ASTModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _a ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
"""simple docstring"""
pass
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(_a )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ASTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( ) -> int:
SCREAMING_SNAKE_CASE__ : List[Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torchaudio.load(__lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __a (unittest.TestCase):
'''simple docstring'''
@cached_property
def _a ( self ) -> int:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[Any] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Dict = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_audio()
SCREAMING_SNAKE_CASE__ : List[str] = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(_a , sampling_rate=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_a )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _a )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 132 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCAmelCase : List[Any] = None
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : List[str] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : Any = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
UpperCAmelCase : Optional[int] = "▁"
class SCREAMING_SNAKE_CASE__ ( a__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BigBirdTokenizer
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = []
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : Tuple="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Dict="[SEP]" , lowerCAmelCase_ : int="[MASK]" , lowerCAmelCase_ : Union[str, Any]="[CLS]" , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else bos_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else eos_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else unk_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else pad_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cls_token
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_)) + [1]
return [1] + ([0] * len(lowerCAmelCase_)) + [1] + ([0] * len(lowerCAmelCase_)) + [1]
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""")
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_):
copyfile(self.vocab_file , lowerCAmelCase_)
return (out_vocab_file,)
| 370 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "perceiver"
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Dict=1_2_8_0 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[Any]=2_6 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]="kv" , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1E-12 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=2_6_2 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Any=5_6 , lowerCAmelCase_ : int=[3_6_8, 4_9_6] , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_9_2_0 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 4_0 , lowerCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(lowerCAmelCase_)
lowercase_ = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""]) * seq_length] * batch_size
lowercase_ = dict(preprocessor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""input_ids""")
return inputs
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
lowercase_ = inputs.pop("""pixel_values""")
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""")
| 313 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase, '''depth_multiplier''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Optional[Any]=13, lowerCamelCase : List[str]=3, lowerCamelCase : List[str]=32, lowerCamelCase : Union[str, Any]=0.25, lowerCamelCase : int=8, lowerCamelCase : Dict=True, lowerCamelCase : Optional[int]=1_024, lowerCamelCase : List[str]=32, lowerCamelCase : Optional[int]="relu6", lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : List[Any]=True, lowerCamelCase : Any=True, lowerCamelCase : Dict=10, lowerCamelCase : Optional[int]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = min_depth
lowercase__ = tf_padding
lowercase__ = int(last_hidden_size * depth_multiplier )
lowercase__ = output_stride
lowercase__ = hidden_act
lowercase__ = classifier_dropout_prob
lowercase__ = use_labels
lowercase__ = is_training
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = scope
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def lowercase__ ( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = MobileNetVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def lowercase__ ( self : str, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MobileNetVaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = MobileNetVaModelTester(self )
lowercase__ = MobileNetVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : List[str] ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = 26
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = MobileNetVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : str ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 207 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : pyspark.sql.DataFrame, lowerCamelCase : Optional[NamedSplit] = None, lowerCamelCase : Optional[Features] = None, lowerCamelCase : bool = True, lowerCamelCase : str = None, lowerCamelCase : bool = False, lowerCamelCase : str = None, lowerCamelCase : bool = True, lowerCamelCase : str = "arrow", **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(
split=lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase, keep_in_memory=lowerCamelCase, streaming=lowerCamelCase, **lowerCamelCase, )
lowercase__ = load_from_cache_file
lowercase__ = file_format
lowercase__ = Spark(
df=lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase, working_dir=lowerCamelCase, **lowerCamelCase, )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCamelCase, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 207 | 1 |
"""simple docstring"""
import math
def UpperCamelCase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCAmelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 289 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase__ : Optional[int] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Union[str, Any] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A ( lowerCamelCase__ ):
lowerCAmelCase_ : Optional[int] = "levit"
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict=224 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Union[str, Any]=[128, 256, 384] , UpperCAmelCase_ : Union[str, Any]=[4, 8, 12] , UpperCAmelCase_ : int=[4, 4, 4] , UpperCAmelCase_ : List[str]=[16, 16, 16] , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Union[str, Any]=[2, 2, 2] , UpperCAmelCase_ : List[Any]=[2, 2, 2] , UpperCAmelCase_ : Optional[Any]=0.02 , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**__lowerCamelCase )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : Union[str, Any] = kernel_size
lowerCAmelCase : int = stride
lowerCAmelCase : Optional[Any] = padding
lowerCAmelCase : Optional[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : List[str] = key_dim
lowerCAmelCase : Optional[int] = drop_path_rate
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Dict = attention_ratio
lowerCAmelCase : Union[str, Any] = mlp_ratio
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : List[str] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A ( lowerCamelCase__ ):
lowerCAmelCase_ : List[Any] = version.parse("1.11" )
@property
def lowercase__ ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase__ ( self : Dict ):
return 1E-4
| 138 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Any):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCAmelCase__ : Tuple = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase__ : int = torch.permute(lowerCamelCase_ ,(0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_):
# linear layer
lowerCAmelCase__ : List[Any] = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCAmelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : int ,lowerCamelCase_ : str):
'''simple docstring'''
if "metadata" in layer:
lowerCAmelCase__ : Optional[Any] = layer.split('''metadata''')
lowerCAmelCase__ : int = ''''''.join(split_layer[0])[:-1]
lowerCAmelCase__ : Optional[int] = [tuple(('''metadata''' + split_layer[1]).split('''/'''))]
elif "kvstore" in layer:
lowerCAmelCase__ : Optional[int] = layer.split('''kvstore''')
lowerCAmelCase__ : Optional[Any] = ''''''.join(split_layer[0])[:-1]
lowerCAmelCase__ : Tuple = [tuple(('''kvstore''' + split_layer[1]).split('''/'''))]
else:
lowerCAmelCase__ : List[str] = layer.split('''/''')
lowerCAmelCase__ : int = '''/'''.join(split_layer[:-1])
lowerCAmelCase__ : List[str] = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCAmelCase__ : Optional[Any] = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
lowerCAmelCase__ : Dict = '''file'''
else:
lowerCAmelCase__ : Optional[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = rename_keys(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = {}
for k, v in current_block.items():
lowerCAmelCase__ : List[Any] = v
lowerCAmelCase__ : Tuple = new_current_block
torch.save(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : str = WEIGHTS_NAME):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = convert_file_size_to_int(lowerCamelCase_)
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : List[str] = 0
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_)
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' ,'''rb''') as fp:
lowerCAmelCase__ : str = serialization.msgpack_restore(fp.read())['''optimizer''']['''target''']
lowerCAmelCase__ : int = flatten_dict(lowerCamelCase_ ,sep='''/''')
lowerCAmelCase__ : str = {}
for layer in checkpoint_info.keys():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = get_key_and_tensorstore_dict(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
if curr_real_layer_name in all_layers:
lowerCAmelCase__ : List[Any] = content
else:
lowerCAmelCase__ : str = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCAmelCase__ : str = ts.open(unflatten_dict(all_layers[key])).result().read().result()
lowerCAmelCase__ : str = torch.tensor(lowerCamelCase_)
lowerCAmelCase__ : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
# use the renaming pattern from the small conversion scripts
lowerCAmelCase__ , lowerCAmelCase__ : int = rename_base_flax_keys(tuple(key.split('''/''')) ,lowerCamelCase_)
lowerCAmelCase__ : List[str] = '''/'''.join(lowerCamelCase_)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCAmelCase__ : Union[str, Any] = os.path.join(
lowerCamelCase_ ,weights_name.replace('''.bin''' ,f"""-{len(lowerCamelCase_)+1:05d}-of-???.bin"""))
rename_and_save_block(lowerCamelCase_ ,lowerCamelCase_)
sharded_state_dicts.append(current_block.keys())
del current_block
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : str = raw_weights.to(getattr(lowerCamelCase_ ,lowerCamelCase_))
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCAmelCase__ : List[str] = os.path.join(lowerCamelCase_ ,weights_name.replace('''.bin''' ,f"""-{len(lowerCamelCase_)+1:05d}-of-???.bin"""))
rename_and_save_block(lowerCamelCase_ ,lowerCamelCase_)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(lowerCamelCase_) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Tuple = {}
for idx, shard in enumerate(lowerCamelCase_):
lowerCAmelCase__ : List[str] = weights_name.replace(
'''.bin''' ,f"""-{idx+1:05d}-of-{len(lowerCamelCase_):05d}.bin""") # len(sharded_state_dicts):05d}
lowerCAmelCase__ : Union[str, Any] = os.path.join(lowerCamelCase_ ,weights_name.replace('''.bin''' ,f"""-{idx+1:05d}-of-???.bin"""))
os.rename(lowerCamelCase_ ,os.path.join(lowerCamelCase_ ,lowerCamelCase_))
lowerCAmelCase__ : List[Any] = shard
for key in shard:
lowerCAmelCase__ : Dict = shard_file
# Add the metadata
lowerCAmelCase__ : Optional[Any] = {'''total_size''': total_size}
lowerCAmelCase__ : Optional[Any] = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(lowerCamelCase_ ,lowerCamelCase_) ,'''w''' ,encoding='''utf-8''') as f:
lowerCAmelCase__ : List[Any] = json.dumps(lowerCamelCase_ ,indent=2 ,sort_keys=lowerCamelCase_) + '''\n'''
f.write(lowerCamelCase_)
return metadata, index
if __name__ == "__main__":
__snake_case : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__snake_case : Dict =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase__ ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCAmelCase__ : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''')
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''')
lowerCAmelCase__ : Union[str, Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' ,device_map='''auto''')
lowerCAmelCase__ : Optional[Any] = TaTokenizer.from_pretrained('''t5-small''')
lowerCAmelCase__ : Any = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
lowerCAmelCase__ : Union[str, Any] = tokenizer(lowerCamelCase_ ,return_tensors='''pt''').input_ids
lowerCAmelCase__ : Tuple = model.generate(lowerCamelCase_ ,decoder_start_token_id=0)
print(tokenizer.decode(out[0]))
| 129 | 0 |
'''simple docstring'''
def _A ( snake_case , snake_case ) -> int:
while a != 0:
_lowercase : List[Any] = b % a, a
return b
def _A ( snake_case , snake_case ) -> int:
if gcd(__lowerCAmelCase , __lowerCAmelCase ) != 1:
_lowercase : List[str] = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__lowerCAmelCase )
_lowercase : Optional[int] = 1, 0, a
_lowercase : Union[str, Any] = 0, 1, m
while va != 0:
_lowercase : Optional[Any] = ua // va
_lowercase : Optional[int] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 362 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = IFImgaImgSuperResolutionPipeline
_SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
_SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
_SCREAMING_SNAKE_CASE : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith("mps" ):
_lowercase : Tuple = torch.manual_seed(_UpperCamelCase )
else:
_lowercase : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 199 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
UpperCAmelCase : List[Any] ={
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowercase (a_ , a_ ):
'''simple docstring'''
lowercase__ = """swin"""
lowercase__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case__=224 , snake_case__=4 , snake_case__=3 , snake_case__=96 , snake_case__=[2, 2, 6, 2] , snake_case__=[3, 6, 12, 24] , snake_case__=7 , snake_case__=4.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=False , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=32 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**__a )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = embed_dim
UpperCamelCase_ = depths
UpperCamelCase_ = len(__a )
UpperCamelCase_ = num_heads
UpperCamelCase_ = window_size
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = hidden_act
UpperCamelCase_ = use_absolute_embeddings
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = initializer_range
UpperCamelCase_ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase_ = int(embed_dim * 2 ** (len(__a ) - 1) )
UpperCamelCase_ = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )]
UpperCamelCase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = version.parse("""1.11""" )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return 1e-4
| 128 |
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Dict = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 359 |
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309 | 0 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _lowerCamelCase ( __a ):
'''simple docstring'''
A_ : Dict = None
A_ : int = None
@property
def __lowerCAmelCase ( self : Dict ) -> Any:
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_A , 'feature_size' ) )
self.assertTrue(hasattr(_A , 'sampling_rate' ) )
self.assertTrue(hasattr(_A , 'padding_value' ) )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
__magic_name__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ : Union[str, Any] = feat_extract.model_input_names[0]
__magic_name__ : int = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
__magic_name__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A )
__magic_name__ : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
__magic_name__ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__ : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowerCAmelCase ( self : Any ) -> List[str]:
__magic_name__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A )
__magic_name__ : str = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ : Union[str, Any] = feat_extract.model_input_names[0]
__magic_name__ : int = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
__magic_name__ : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__ : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
__magic_name__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A )
__magic_name__ : str = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ : Optional[Any] = feat_extract.model_input_names[0]
__magic_name__ : int = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
__magic_name__ : Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__ : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowerCAmelCase ( self : Dict , _A : Tuple=False ) -> List[str]:
def _inputs_have_equal_length(_A : List[Any] ):
__magic_name__ : List[Any] = len(input[0] )
for input_slice in input[1:]:
if len(_A ) != length:
return False
return True
def _inputs_are_equal(_A : Any , _A : int ):
if len(_A ) != len(_A ):
return False
for input_slice_a, input_slice_a in zip(_A , _A ):
if not np.allclose(np.asarray(_A ) , np.asarray(_A ) , atol=1E-3 ):
return False
return True
__magic_name__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ : int = self.feat_extract_tester.prepare_inputs_for_common(numpify=_A )
__magic_name__ : Tuple = feat_extract.model_input_names[0]
__magic_name__ : List[Any] = BatchFeature({input_name: speech_inputs} )
__magic_name__ : Dict = self.feat_extract_tester.seq_length_diff
__magic_name__ : str = self.feat_extract_tester.max_seq_length + pad_diff
__magic_name__ : Any = self.feat_extract_tester.min_seq_length
__magic_name__ : Optional[int] = self.feat_extract_tester.batch_size
__magic_name__ : List[str] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__magic_name__ : str = feat_extract.pad(_A , padding=_A )
__magic_name__ : List[Any] = input_a[input_name]
__magic_name__ : Dict = feat_extract.pad(_A , padding='longest' )
__magic_name__ : List[str] = input_a[input_name]
__magic_name__ : Optional[int] = feat_extract.pad(_A , padding='max_length' , max_length=len(speech_inputs[-1] ) )
__magic_name__ : Tuple = input_a[input_name]
__magic_name__ : str = feat_extract.pad(_A , padding='longest' , return_tensors='np' )
__magic_name__ : Tuple = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='max_length' )[input_name]
__magic_name__ : Dict = feat_extract.pad(
_A , padding='max_length' , max_length=_A , return_tensors='np' )
__magic_name__ : List[str] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_are_equal(_A , _A ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__magic_name__ : List[Any] = feat_extract.pad(_A , pad_to_multiple_of=10 )
__magic_name__ : Optional[Any] = input_a[input_name]
__magic_name__ : List[str] = feat_extract.pad(_A , padding='longest' , pad_to_multiple_of=10 )
__magic_name__ : Optional[int] = input_a[input_name]
__magic_name__ : List[Any] = feat_extract.pad(
_A , padding='max_length' , pad_to_multiple_of=10 , max_length=_A )
__magic_name__ : List[Any] = input_a[input_name]
__magic_name__ : Dict = feat_extract.pad(
_A , padding='max_length' , pad_to_multiple_of=10 , max_length=_A , return_tensors='np' , )
__magic_name__ : Optional[Any] = input_a[input_name]
self.assertTrue(all(len(_A ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_A , _A ) )
__magic_name__ : Optional[Any] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_A ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__magic_name__ : Optional[int] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Dict=False ) -> Any:
def _inputs_have_equal_length(_A : Optional[int] ):
__magic_name__ : List[str] = len(input[0] )
for input_slice in input[1:]:
if len(_A ) != length:
return False
return True
def _inputs_are_equal(_A : Union[str, Any] , _A : Any ):
if len(_A ) != len(_A ):
return False
for input_slice_a, input_slice_a in zip(_A , _A ):
if not np.allclose(np.asarray(_A ) , np.asarray(_A ) , atol=1E-3 ):
return False
return True
__magic_name__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ : Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=_A )
__magic_name__ : Optional[Any] = feat_extract.model_input_names[0]
__magic_name__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__magic_name__ : List[Any] = feat_extract.pad(
_A , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_A )
__magic_name__ : int = input_a[input_name]
__magic_name__ : int = feat_extract.pad(_A , padding='max_length' , max_length=len(speech_inputs[0] ) )
__magic_name__ : List[str] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertFalse(_inputs_have_equal_length(_A ) )
# truncate to smallest with np
__magic_name__ : Optional[int] = feat_extract.pad(
_A , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_A , )
__magic_name__ : int = input_a[input_name]
__magic_name__ : Any = feat_extract.pad(
_A , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
__magic_name__ : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_A ) )
# truncate to middle
__magic_name__ : int = feat_extract.pad(
_A , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_A , return_tensors='np' , )
__magic_name__ : Optional[Any] = input_a[input_name]
__magic_name__ : Optional[Any] = feat_extract.pad(
_A , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_A )
__magic_name__ : Union[str, Any] = input_a[input_name]
__magic_name__ : Dict = feat_extract.pad(
_A , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
__magic_name__ : str = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertTrue(_inputs_are_equal(_A , _A ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_A ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A ):
feat_extract.pad(_A , truncation=_A )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='longest' , truncation=_A )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='longest' , truncation=_A )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_A ):
feat_extract.pad(_A , padding='max_length' , truncation=_A )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__magic_name__ : Dict = 12
__magic_name__ : Tuple = feat_extract.pad(
_A , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_A , truncation=_A , )
__magic_name__ : List[Any] = input_a[input_name]
__magic_name__ : Optional[Any] = feat_extract.pad(
_A , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_A , )
__magic_name__ : Any = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__magic_name__ : Optional[int] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__magic_name__ : int = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_A ) )
self.assertFalse(_inputs_have_equal_length(_A ) )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
self._check_padding(numpify=_A )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
self._check_padding(numpify=_A )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
self._check_truncation(numpify=_A )
def __lowerCAmelCase ( self : str ) -> List[Any]:
self._check_truncation(numpify=_A )
@require_torch
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__ : List[str] = feat_extract.model_input_names[0]
__magic_name__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
__magic_name__ : List[str] = feat_extract.pad(_A , padding='longest' , return_tensors='np' )[input_name]
__magic_name__ : Union[str, Any] = feat_extract.pad(_A , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowerCAmelCase ( self : Dict ) -> int:
__magic_name__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ : int = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__ : Optional[Any] = feat_extract.model_input_names[0]
__magic_name__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
__magic_name__ : str = feat_extract.pad(_A , padding='longest' , return_tensors='np' )[input_name]
__magic_name__ : Any = feat_extract.pad(_A , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : List[Any] = self.feat_extract_dict
__magic_name__ : Any = True
__magic_name__ : Dict = self.feature_extraction_class(**_A )
__magic_name__ : int = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__ : List[Any] = [len(_A ) for x in speech_inputs]
__magic_name__ : Optional[int] = feat_extract.model_input_names[0]
__magic_name__ : Tuple = BatchFeature({input_name: speech_inputs} )
__magic_name__ : Dict = feat_extract.pad(_A , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def __lowerCAmelCase ( self : str ) -> Dict:
__magic_name__ : str = self.feat_extract_dict
__magic_name__ : Tuple = True
__magic_name__ : Optional[int] = self.feature_extraction_class(**_A )
__magic_name__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__ : List[Any] = [len(_A ) for x in speech_inputs]
__magic_name__ : List[str] = feat_extract.model_input_names[0]
__magic_name__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
__magic_name__ : Optional[int] = min(_A )
__magic_name__ : Union[str, Any] = feat_extract.pad(
_A , padding='max_length' , max_length=_A , truncation=_A , return_tensors='np' )
self.assertIn('attention_mask' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) | 331 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : List[str], _lowerCAmelCase : Dict ) -> str:
_UpperCAmelCase : Union[str, Any] = OmegaConf.load(_lowerCAmelCase )
_UpperCAmelCase : str = torch.load(_lowerCAmelCase, map_location="""cpu""" )["""model"""]
_UpperCAmelCase : Dict = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : List[str] = """first_stage_model."""
for key in keys:
if key.startswith(_lowerCAmelCase ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : str = {}
_UpperCAmelCase : Tuple = """model.diffusion_model."""
for key in keys:
if key.startswith(_lowerCAmelCase ):
_UpperCAmelCase : Tuple = state_dict[key]
_UpperCAmelCase : Optional[Any] = config.model.params.first_stage_config.params
_UpperCAmelCase : Optional[Any] = config.model.params.unet_config.params
_UpperCAmelCase : List[str] = VQModel(**_lowerCAmelCase ).eval()
vqvae.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = UNetLDMModel(**_lowerCAmelCase ).eval()
unet.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule="""scaled_linear""", beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=_lowerCAmelCase, )
_UpperCAmelCase : Tuple = LDMPipeline(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
pipeline.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 246 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase ) -> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
UpperCAmelCase : Tuple = """"""
while len(_lowercase ) % 3 != 0:
UpperCAmelCase : Dict = """0""" + bin_string
UpperCAmelCase : str = [
bin_string[index : index + 3]
for index in range(len(_lowercase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCAmelCase : Optional[Any] = 0
for index, val in enumerate(_lowercase ):
oct_val += int(2 ** (2 - index) * int(_lowercase ) )
oct_string += str(_lowercase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 338 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : List[str] = """Hello, World!"""
a : List[Any] = """en_XX"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : Dict = Path("""data_bin""" )
UpperCAmelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowercase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowercase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowercase )
UpperCAmelCase : List[str] = xmod.model.encoder.sentence_encoder
UpperCAmelCase : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowercase )
UpperCAmelCase : str = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase : int = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase : Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase : List[str] = model.roberta.encoder.layer[i]
UpperCAmelCase : Optional[Any] = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase : Optional[Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
UpperCAmelCase : List[Any] = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase : Any = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
UpperCAmelCase : Any = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase : List[str] = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase : Tuple = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
UpperCAmelCase : List[str] = xmod_layer.fca.weight
UpperCAmelCase : str = xmod_layer.fca.bias
# output
UpperCAmelCase : Any = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
UpperCAmelCase : Dict = xmod_layer.fca.weight
UpperCAmelCase : Dict = xmod_layer.fca.bias
UpperCAmelCase : Any = xmod_layer.final_layer_norm.weight
UpperCAmelCase : Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase : str = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase : List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase : List[Any] = bert_output.adapter_modules[lang_code]
UpperCAmelCase : Dict = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase : Any = from_adapter.fca.weight
UpperCAmelCase : int = from_adapter.fca.bias
UpperCAmelCase : Dict = from_adapter.fca.weight
UpperCAmelCase : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase : Tuple = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].dense.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].dense.bias
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].out_proj.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
UpperCAmelCase : Dict = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase : str = xmod.model.encoder.lm_head.weight
UpperCAmelCase : str = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase : Any = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
UpperCAmelCase : Optional[int] = model(_lowercase )[0]
if classification_head:
UpperCAmelCase : List[Any] = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowercase ) )
else:
UpperCAmelCase : Optional[Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase : Dict = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a : List[str] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 338 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
snake_case_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
snake_case_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _lowerCAmelCase ( lowercase_ ):
with open(lowercase_ , 'rb' ) as f:
UpperCAmelCase = Image.open(lowercase_ )
return im.convert('RGB' )
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A folder containing the training data."""} )
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A folder containing the validation data."""} )
__UpperCamelCase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase__ ( self :Tuple ) -> str:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(SCREAMING_SNAKE_CASE_ )} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
__UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Name or path of preprocessor config."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = torch.stack([example['pixel_values'] for example in examples] )
UpperCAmelCase = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCAmelCase = {}
if data_args.train_dir is not None:
UpperCAmelCase = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
UpperCAmelCase = os.path.join(data_args.validation_dir , '**' )
UpperCAmelCase = load_dataset(
'imagefolder' , data_files=lowercase_ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase_ ) and data_args.train_val_split > 0.0:
UpperCAmelCase = dataset['train'].train_test_split(data_args.train_val_split )
UpperCAmelCase = split['train']
UpperCAmelCase = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase = dataset['train'].features['labels'].names
UpperCAmelCase , UpperCAmelCase = {}, {}
for i, label in enumerate(lowercase_ ):
UpperCAmelCase = str(lowercase_ )
UpperCAmelCase = label
# Load the accuracy metric from the datasets package
UpperCAmelCase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
UpperCAmelCase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
UpperCAmelCase = image_processor.size['shortest_edge']
else:
UpperCAmelCase = (image_processor.size['height'], image_processor.size['width'])
UpperCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
UpperCAmelCase = Compose(
[
RandomResizedCrop(lowercase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
UpperCAmelCase = Compose(
[
Resize(lowercase_ ),
CenterCrop(lowercase_ ),
ToTensor(),
normalize,
] )
def train_transforms(lowercase_ ):
UpperCAmelCase = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(lowercase_ ):
UpperCAmelCase = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCAmelCase = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowercase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowercase_ )
# Initalize our trainer
UpperCAmelCase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('eval' , lowercase_ )
trainer.save_metrics('eval' , lowercase_ )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 78 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __snake_case ( lowerCAmelCase ):
_a : str= "gpt_neo"
_a : Optional[int]= ["past_key_values"]
_a : Dict= {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self ,snake_case=50257 ,snake_case=2048 ,snake_case=2048 ,snake_case=24 ,snake_case=[[["global", "local"], 12]] ,snake_case=16 ,snake_case=None ,snake_case=256 ,snake_case="gelu_new" ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.1 ,snake_case=1e-5 ,snake_case=0.02 ,snake_case=True ,snake_case=50256 ,snake_case=50256 ,**snake_case ,):
'''simple docstring'''
lowercase : int = vocab_size
lowercase : Union[str, Any] = max_position_embeddings
lowercase : Dict = hidden_size
lowercase : Union[str, Any] = num_layers
lowercase : Union[str, Any] = num_heads
lowercase : Optional[int] = intermediate_size
lowercase : List[str] = window_size
lowercase : Optional[int] = activation_function
lowercase : List[str] = resid_dropout
lowercase : int = embed_dropout
lowercase : Optional[int] = attention_dropout
lowercase : Tuple = classifier_dropout
lowercase : Optional[int] = layer_norm_epsilon
lowercase : Dict = initializer_range
lowercase : List[str] = use_cache
lowercase : Optional[int] = bos_token_id
lowercase : int = eos_token_id
lowercase : Union[str, Any] = attention_types
lowercase : Dict = self.expand_attention_types_params(snake_case )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
lowercase : List[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
import torch
lowercase : Tuple = input.size()
lowercase : int = len(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = shape[dimension]
lowercase : int = torch.arange(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.div(sizedim - size , SCREAMING_SNAKE_CASE__ , rounding_mode="""floor""" ) + 1
lowercase : Dict = torch.arange(SCREAMING_SNAKE_CASE__ ) + low_indices[:min_length][:, None]
lowercase : Union[str, Any] = [slice(SCREAMING_SNAKE_CASE__ )] * rank
lowercase : Optional[Any] = indices
lowercase : List[str] = input[s]
lowercase : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
import torch
lowercase : Union[str, Any] = torch.arange(1 , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.remainder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = remainders == 0
lowercase : Optional[int] = candidates[divisor_indices]
lowercase : List[Any] = torch.max(SCREAMING_SNAKE_CASE__ )
return largest_divisor, torch.div(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rounding_mode="""floor""" )
class __snake_case ( lowerCAmelCase ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case ,direction="""inputs""" )
lowercase : Dict = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowercase : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self._config.num_heads
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = -1 ,snake_case = -1 ,snake_case = False ,snake_case = None ,):
'''simple docstring'''
lowercase : Any = super(snake_case ,self ).generate_dummy_inputs(
snake_case ,batch_size=snake_case ,seq_length=snake_case ,is_pair=snake_case ,framework=snake_case )
# We need to order the input in the way they appears in the forward()
lowercase : List[str] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase : Optional[int] = seqlen + 2
lowercase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase : Optional[Any] = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(self.num_layers )
]
lowercase : Optional[Any] = common_inputs["""attention_mask"""]
if self.use_past:
lowercase : Any = ordered_inputs["""attention_mask"""].dtype
lowercase : Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case ,snake_case ,dtype=snake_case )] ,dim=1 )
return ordered_inputs
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 13
| 20 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any = 1_000_000 , _lowerCamelCase : Dict = 10) -> int:
'''simple docstring'''
__UpperCamelCase : List[str] = defaultdict(lowercase_)
for outer_width in range(3 , (t_limit // 4) + 2):
if outer_width * outer_width > t_limit:
__UpperCamelCase : List[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit)) , 1)
else:
__UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ , outer_width - 1 , 2):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10)
if __name__ == "__main__":
print(f"{solution() = }") | 362 |
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 2) -> qiskit.result.counts.Counts:
'''simple docstring'''
__UpperCamelCase : List[str] = qubits
# Using Aer's simulator
__UpperCamelCase : int = qiskit.Aer.get_backend("aer_simulator")
# Creating a Quantum Circuit acting on the q register
__UpperCamelCase : List[str] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase)
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0)
for i in range(1 , _lowerCamelCase):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowerCamelCase)
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowerCamelCase)) , list(range(_lowerCamelCase)))
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__UpperCamelCase : Any = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_000)
return job.result().get_counts(_lowerCamelCase)
if __name__ == "__main__":
print(f"Total count for various states are: {quantum_entanglement(3)}") | 151 | 0 |
"""simple docstring"""
__a = "Tobias Carryer"
from time import time
class lowerCamelCase :
'''simple docstring'''
def __init__( self: int , snake_case: Any , snake_case: Any , snake_case: Optional[Any] , snake_case: List[Any]=int(time() ) ) -> Optional[Any]: # noqa: B008
snake_case_ :Any = multiplier
snake_case_ :Union[str, Any] = increment
snake_case_ :Union[str, Any] = modulo
snake_case_ :Any = seed
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_ :Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__a = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 66 |
'''simple docstring'''
import math
def A_ ( snake_case , snake_case ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 139 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( a , a ):
UpperCAmelCase__ : List[str] = '''swin'''
UpperCAmelCase__ : Any = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Tuple , _snake_case : Union[str, Any]=224 , _snake_case : Optional[int]=4 , _snake_case : List[str]=3 , _snake_case : Dict=96 , _snake_case : int=[2, 2, 6, 2] , _snake_case : List[Any]=[3, 6, 12, 24] , _snake_case : Dict=7 , _snake_case : List[str]=4.0 , _snake_case : List[Any]=True , _snake_case : Any=0.0 , _snake_case : int=0.0 , _snake_case : Tuple=0.1 , _snake_case : str="gelu" , _snake_case : Tuple=False , _snake_case : Tuple=0.0_2 , _snake_case : Optional[int]=1e-5 , _snake_case : List[Any]=32 , _snake_case : int=None , _snake_case : Tuple=None , **_snake_case : str , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_snake_case)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_snake_case) - 1))
UpperCAmelCase_ = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_snake_case) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names)
class __snake_case ( a ):
UpperCAmelCase__ : Union[str, Any] = version.parse('''1.11''' )
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return 1e-4
| 7 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowercase_ = (
subprocess.check_output(f"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode('utf-8').split()
)
lowercase_ = '|'.join(sys.argv[1:])
lowercase_ = re.compile(Rf"^({joined_dirs}).*?\.py$")
lowercase_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 205 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(4_2)
lowercase_ = 'sshleifer/student_marian_en_ro_6_1'
lowercase_ = 'sshleifer/tiny-mbart'
@require_torch
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , ) -> Dict:
'''simple docstring'''
_lowercase =self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowerCAmelCase , num_train_epochs=1 , distributed=lowerCAmelCase , extra_args_str=lowerCAmelCase , predict_with_generate=lowerCAmelCase , do_train=lowerCAmelCase , do_eval=lowerCAmelCase , do_predict=lowerCAmelCase , )
_lowercase =TrainerState.load_from_json(os.path.join(lowerCAmelCase , 'trainer_state.json' ) ).log_history
if not do_eval:
return
_lowercase =[log for log in logs if 'eval_loss' in log.keys()]
_lowercase =eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_lowercase =eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , lowerCAmelCase )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A__ ( self ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase )
@require_torch_multi_gpu
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Dict:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=lowerCAmelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=lowerCAmelCase , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=lowerCAmelCase )
@require_apex
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def A__ ( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase ={
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
_lowercase =experiments[experiment_id]
_lowercase ={'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
_lowercase ='Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowerCAmelCase , extra_args_str=data['extra_args_str'] )
_lowercase =len(re.findall(lowerCAmelCase , cl.err ) )
self.assertEqual(lowerCAmelCase , data['n_matches'] )
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.run_trainer(
eval_steps=2 , max_len=128 , model_name=lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=lowerCAmelCase , )
# Check metrics
_lowercase =TrainerState.load_from_json(os.path.join(lowerCAmelCase , 'trainer_state.json' ) ).log_history
_lowercase =[log for log in logs if 'eval_loss' in log.keys()]
_lowercase =eval_metrics[0]
_lowercase =eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , lowerCAmelCase )
# test if do_predict saves generations and metrics
_lowercase =os.listdir(lowerCAmelCase )
_lowercase ={os.path.basename(lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A__ ( self ) -> List[str]:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowerCAmelCase ) -> Tuple[int, float]:
_lowercase ='--skip_memory_metrics 0'
_lowercase =self.run_trainer(
max_len=128 , model_name=lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=lowerCAmelCase , distributed=lowerCAmelCase , extra_args_str=lowerCAmelCase , do_eval=lowerCAmelCase , do_predict=lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
_lowercase =TrainerState.load_from_json(Path(lowerCAmelCase , 'trainer_state.json' ) ).log_history
_lowercase =int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
_lowercase =int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
_lowercase =logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_lowercase , _lowercase , _lowercase =train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_lowercase , _lowercase , _lowercase =train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_lowercase =gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_lowercase =gpu_peak_mem_orig + gpu_alloc_mem_orig
_lowercase =gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_lowercase =gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_lowercase =120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowerCAmelCase , lowerCAmelCase , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
lowerCAmelCase , lowerCAmelCase , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
lowerCAmelCase , lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 3e-3 , lowerCAmelCase = "adafactor" , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = 0 , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = None , ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
_lowercase =self.get_auto_remove_tmp_dir()
_lowercase =F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
_lowercase =F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowerCAmelCase )}
'''.split()
_lowercase ='\n --do_predict\n '.split()
_lowercase =[]
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_lowercase =get_gpu_count()
_lowercase =get_torch_dist_unique_port()
_lowercase =F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
_lowercase =[sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCAmelCase , env=self.get_env() )
else:
_lowercase =['run_translation.py'] + args
with patch.object(lowerCAmelCase , 'argv' , lowerCAmelCase ):
main()
return output_dir
| 205 | 1 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = abs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = 0
while n > 0:
res += n % 10
n //= 10
return res
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = abs(_UpperCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A_ ( _UpperCAmelCase ):
return sum(int(_UpperCAmelCase ) for c in str(abs(_UpperCAmelCase ) ) )
def A_ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) -> None:
SCREAMING_SNAKE_CASE_: str = f"{func.__name__}({value})"
SCREAMING_SNAKE_CASE_: Any = timeit(f"__main__.{call}" , setup="import __main__" )
print(f"{call:56} = {func(_UpperCAmelCase )} -- {timing:.4f} seconds" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 127 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Dict = StableDiffusionInpaintPipeline
_UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCAmelCase : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_UpperCAmelCase : Optional[int] = frozenset([] )
def _SCREAMING_SNAKE_CASE ( self : int):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
SCREAMING_SNAKE_CASE_: List[str] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
SCREAMING_SNAKE_CASE_: Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE_: Tuple = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("RGB").resize((64, 64))
SCREAMING_SNAKE_CASE_: List[str] = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64))
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Tuple = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: int = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: int = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self.get_dummy_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = sd_pipe(**lowerCAmelCase__).images
SCREAMING_SNAKE_CASE_: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Tuple = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : List[str]):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
SCREAMING_SNAKE_CASE_: int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
SCREAMING_SNAKE_CASE_: Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy")
SCREAMING_SNAKE_CASE_: List[str] = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_: Any = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__ , safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: str = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_: Optional[int] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9E-3
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
SCREAMING_SNAKE_CASE_: List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
SCREAMING_SNAKE_CASE_: Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy")
SCREAMING_SNAKE_CASE_: str = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_: Dict = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: List[str] = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_: Tuple = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Dict = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5E-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_: Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
SCREAMING_SNAKE_CASE_: Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
SCREAMING_SNAKE_CASE_: List[str] = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_: Tuple = PNDMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler")
SCREAMING_SNAKE_CASE_: Any = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: Any = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_: Any = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 127 | 1 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : Union[str, Any]="attention" ):
UpperCAmelCase = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
UpperCAmelCase = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
UpperCAmelCase = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
UpperCAmelCase = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def snake_case_ (_a : int , _a : Dict , _a : Dict , _a : int=False ):
if split_mlp_wi:
UpperCAmelCase = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
UpperCAmelCase = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
UpperCAmelCase = (wi_a, wi_a)
else:
UpperCAmelCase = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
UpperCAmelCase = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def snake_case_ (_a : str , _a : str , _a : Optional[Any] , _a : Dict ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def snake_case_ (_a : Optional[int] , *, _a : int , _a : Optional[int] ):
UpperCAmelCase = traverse_util.flatten_dict(variables['''target'''] )
UpperCAmelCase = {'''/'''.join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , snake_case__ )
UpperCAmelCase = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase = old['''token_embedder/embedding''']
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(snake_case__ , snake_case__ , '''encoder''' , '''attention''' )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase , UpperCAmelCase = tax_mlp_lookup(snake_case__ , snake_case__ , '''encoder''' , snake_case__ )
UpperCAmelCase = layer_norm
if split_mlp_wi:
UpperCAmelCase = wi[0].T
UpperCAmelCase = wi[1].T
else:
UpperCAmelCase = wi.T
UpperCAmelCase = wo.T
UpperCAmelCase = old[
'''encoder/relpos_bias/rel_embedding'''
].T
UpperCAmelCase = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(snake_case__ , snake_case__ , '''decoder''' , '''self_attention''' )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(snake_case__ , snake_case__ , '''decoder''' , '''encoder_decoder_attention''' )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase , UpperCAmelCase = tax_mlp_lookup(snake_case__ , snake_case__ , '''decoder''' , snake_case__ )
UpperCAmelCase = layer_norm
if split_mlp_wi:
UpperCAmelCase = wi[0].T
UpperCAmelCase = wi[1].T
else:
UpperCAmelCase = wi.T
UpperCAmelCase = wo.T
UpperCAmelCase = old['''decoder/decoder_norm/scale''']
UpperCAmelCase = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case_ (_a : Tuple , _a : Union[str, Any] ):
UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCAmelCase = state_dict['''shared.weight''']
return state_dict
def snake_case_ (_a : Union[str, Any] , _a : Optional[Any] , _a : int , _a : Any ):
UpperCAmelCase = checkpoints.load_tax_checkpoint(snake_case__ )
UpperCAmelCase = convert_tax_to_pytorch(snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ )
UpperCAmelCase = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def snake_case_ (_a : Tuple , _a : List[Any] , _a : Dict , _a : Dict = False ):
UpperCAmelCase = TaConfig.from_json_file(snake_case__ )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase = TaEncoderModel(snake_case__ )
else:
UpperCAmelCase = TaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print('''Done''' )
if __name__ == "__main__":
A =argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
A =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 34 | import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray:
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 | 0 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int = 1000 ) -> int:
_UpperCAmelCase : List[str] = 2**power
_UpperCAmelCase : Optional[int] = 0
while n:
_UpperCAmelCase , _UpperCAmelCase : Any = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 189 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class a :
_lowercase = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
_lowercase = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
_lowercase = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the training data."} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the validation data."} )
_lowercase = field(default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the test data."} )
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
_UpperCAmelCase : int = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase : Optional[int] = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class a :
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_lowercase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase : Tuple = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
datasets.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCAmelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase : int = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase : Tuple = data_args.train_file.split("." )[-1]
_UpperCAmelCase : Optional[Any] = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase : Union[str, Any] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
_UpperCAmelCase : List[str] = load_dataset("csv" , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase : Dict = load_dataset("json" , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase : List[str] = raw_datasets["train"].features["label"].names
_UpperCAmelCase : Tuple = len(lowerCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase : Tuple = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase , )
_UpperCAmelCase : Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase : Tuple = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase : List[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase : List[Any] = {"Refused": 0, "Entailed": 1}
_UpperCAmelCase : Optional[int] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_UpperCAmelCase : int = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase: str ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase: List[Any] ):
_UpperCAmelCase : Any = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
_UpperCAmelCase : List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase : Tuple = examples["statement"]
_UpperCAmelCase : List[Any] = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
_UpperCAmelCase : Dict = tokenizer(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase )
_UpperCAmelCase : List[str] = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
_UpperCAmelCase : List[Any] = raw_datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_UpperCAmelCase : Dict = raw_datasets["train"]
if data_args.max_train_samples is not None:
_UpperCAmelCase : List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_UpperCAmelCase : Union[str, Any] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
_UpperCAmelCase : Dict = raw_datasets["test"]
if data_args.max_predict_samples is not None:
_UpperCAmelCase : Any = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase: EvalPrediction ):
_UpperCAmelCase : Optional[int] = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase ) else p.predictions
_UpperCAmelCase : Optional[Any] = np.argmax(lowerCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase : str = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase : int = DataCollatorWithPadding(lowerCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase : List[str] = None
# Initialize our Trainer
_UpperCAmelCase : List[Any] = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : Dict = last_checkpoint
_UpperCAmelCase : str = trainer.train(resume_from_checkpoint=lowerCAmelCase )
_UpperCAmelCase : Tuple = train_result.metrics
_UpperCAmelCase : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase )
)
_UpperCAmelCase : Any = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCAmelCase )
trainer.save_metrics("train" , lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase )
_UpperCAmelCase : Any = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics("eval" , lowerCAmelCase )
trainer.save_metrics("eval" , lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase : int = predict_dataset.remove_columns("label" )
_UpperCAmelCase : Any = trainer.predict(lowerCAmelCase , metric_key_prefix="predict" ).predictions
_UpperCAmelCase : List[str] = np.argmax(lowerCAmelCase , axis=1 )
_UpperCAmelCase : int = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCAmelCase ):
_UpperCAmelCase : List[Any] = label_list[item]
writer.write(F'{index}\t{item}\n' )
_UpperCAmelCase : int = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 189 | 1 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCAmelCase = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_snake_case = list(s_dict.keys() )
for key in keys:
_snake_case = R""".*/layers_(\d+)"""
_snake_case = key
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , _SCREAMING_SNAKE_CASE )
_snake_case = R"""(encoder|decoder)\/"""
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).groups()
if groups[0] == "encoder":
_snake_case = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , _SCREAMING_SNAKE_CASE )
_snake_case = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , _SCREAMING_SNAKE_CASE )
elif groups[0] == "decoder":
_snake_case = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , _SCREAMING_SNAKE_CASE )
_snake_case = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , _SCREAMING_SNAKE_CASE )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_snake_case = new_key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""{key} -> {new_key}""" )
_snake_case = s_dict.pop(_SCREAMING_SNAKE_CASE )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_snake_case = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_snake_case = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_snake_case = s_dict[key].shape[0]
_snake_case = s_dict[key]
for idx in range(_SCREAMING_SNAKE_CASE ):
_snake_case = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(_SCREAMING_SNAKE_CASE )
return s_dict
__lowerCAmelCase = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.read()
_snake_case = re.findall(R"""(.*) = ([0-9.]*)""" , _SCREAMING_SNAKE_CASE )
_snake_case = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_snake_case = float(_SCREAMING_SNAKE_CASE ) if """.""" in value else int(_SCREAMING_SNAKE_CASE )
_snake_case = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , _SCREAMING_SNAKE_CASE )[0]
_snake_case = str(activation[1] )
_snake_case = num_experts
_snake_case = SwitchTransformersConfig(**_SCREAMING_SNAKE_CASE )
return config
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="./" , _SCREAMING_SNAKE_CASE=8 ):
# Initialise PyTorch model
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
_snake_case = checkpoints.load_tax_checkpoint(_SCREAMING_SNAKE_CASE )
if gin_file is not None:
_snake_case = convert_gin_to_config(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
_snake_case = SwitchTransformersConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = SwitchTransformersForConditionalGeneration(_SCREAMING_SNAKE_CASE )
_snake_case = flax_params["""target"""]
_snake_case = flatten_dict(_SCREAMING_SNAKE_CASE , sep="""/""" )
_snake_case = rename_keys(_SCREAMING_SNAKE_CASE )
_snake_case = unflatten_dict(_SCREAMING_SNAKE_CASE , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
__lowerCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
) | 341 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__lowerCAmelCase = 8
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ):
_snake_case = x.device
_snake_case = (x * 255).int().clamp(0 , 255 )
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c h w -> b c 1 h w""" )
_snake_case = ((x & mask) != 0).float()
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c d h w -> b (c d) h w""" )
_snake_case = bits * 2 - 1
return bits
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ):
_snake_case = x.device
_snake_case = (x > 0).int()
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" )
_snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b (c d) h w -> b c d h w""" , d=8 )
_snake_case = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_snake_case = self.alphas_cumprod[timestep]
_snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_snake_case = model_output.device if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else """cpu"""
_snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise
_snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="epsilon" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ):
_snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_snake_case, _snake_case = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
_snake_case = None
# 1. compute alphas, betas
_snake_case = self.alphas_cumprod[t]
_snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
_snake_case = 1 - alpha_prod_t
_snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_snake_case = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_snake_case = 0
if t > 0:
_snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_SCREAMING_SNAKE_CASE ).to(model_output.device )
_snake_case = (self._get_variance(_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise
_snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1.0 , ) -> Tuple:
super().__init__()
_snake_case = bit_scale
_snake_case = (
ddim_bit_scheduler_step if isinstance(UpperCAmelCase , UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__(self , UpperCAmelCase = 256 , UpperCAmelCase = 256 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
_snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase , )
_snake_case = decimal_to_bits(UpperCAmelCase ) * self.bit_scale
_snake_case = latents.to(self.device )
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_snake_case = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
_snake_case = bits_to_decimal(UpperCAmelCase )
if output_type == "pil":
_snake_case = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 341 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 201 |
def _A ( __magic_name__ ):
lowercase__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _A ( __magic_name__ ):
lowercase__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowercase__ = remove_duplicates(key.upper() )
lowercase__ = len(__magic_name__ )
# First fill cipher with key characters
lowercase__ = {alphabet[i]: char for i, char in enumerate(__magic_name__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__magic_name__ ) , 26 ):
lowercase__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowercase__ = alphabet[i - offset]
lowercase__ = char
return cipher_alphabet
def _A ( __magic_name__ , __magic_name__ ):
return "".join(cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _A ( ):
lowercase__ = input("Enter message to encode or decode: " ).strip()
lowercase__ = input("Enter keyword: " ).strip()
lowercase__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
lowercase__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
lowercase__ = create_cipher_map(__magic_name__ )
print(func(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 201 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
A : Union[str, Any] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def __lowerCamelCase ( __a :List[str] = "mumbai" ) -> Union[str, Any]:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
A__ = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
A__ = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 274 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
A_ : Dict = nn.Parameter(_UpperCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
A_ : Optional[Any] = nn.Parameter(_UpperCAmelCase )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[int] = np.asarray(weights[0] )
A_ : Optional[Any] = np.asarray(weights[1] )
A_ : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : int = np.asarray(weights[0] )
A_ : Optional[int] = np.asarray(weights[1] )
A_ : int = np.asarray(weights[2] )
A_ : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_UpperCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_UpperCAmelCase ).view(-1 , _UpperCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = weights[0][0][0]
A_ : Any = np.asarray(layer_norm_a[0] )
A_ : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , )
# lsh weights + output
A_ : List[str] = weights[0][1]
if len(_UpperCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase )
else:
set_layer_weights_in_torch_local(_UpperCAmelCase , torch_block.attention , _UpperCAmelCase )
# intermediate weighs
A_ : Dict = weights[2][0][1][2]
# Chunked Feed Forward
if len(_UpperCAmelCase ) == 4:
A_ : Tuple = intermediate_weights[2]
# layernorm 2
A_ : List[Any] = np.asarray(intermediate_weights[0][0] )
A_ : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , )
# intermediate dense
A_ : Optional[int] = np.asarray(intermediate_weights[1][0] )
A_ : List[str] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , )
# intermediate out
A_ : List[str] = np.asarray(intermediate_weights[4][0] )
A_ : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = torch_model.reformer
# word embeds
A_ : str = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_UpperCAmelCase ) , )
if isinstance(weights[3] , _UpperCAmelCase ):
A_ : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
A_ : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
A_ : Tuple = nn.Parameter(torch.tensor(_UpperCAmelCase ) )
A_ : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_UpperCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
A_ : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# output layer norm
A_ : int = np.asarray(weights[7][0] )
A_ : str = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) , )
# output embeddings
A_ : Optional[Any] = np.asarray(weights[9][0] )
A_ : Tuple = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_UpperCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCAmelCase ) , )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = ReformerConfig.from_json_file(_UpperCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
A_ : Optional[Any] = ReformerModelWithLMHead(_UpperCAmelCase )
with open(_UpperCAmelCase , '''rb''' ) as f:
A_ : Union[str, Any] = pickle.load(_UpperCAmelCase )['''weights''']
set_model_weights_in_torch(_UpperCAmelCase , _UpperCAmelCase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 167 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
class lowercase_ ( A ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Tuple=5_1_2 , __lowerCamelCase : Optional[int]="cls" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Any=True , **__lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = project_dim
_SCREAMING_SNAKE_CASE = pooler_fn
_SCREAMING_SNAKE_CASE = learn_encoder
_SCREAMING_SNAKE_CASE = use_attention_mask
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = [r'''pooler''', r'''logit_scale''']
lowerCamelCase_ = [r'''position_ids''', r'''predictions.decoder.bias''']
lowerCamelCase_ = '''roberta'''
lowerCamelCase_ = RobertaSeriesConfig
def __init__( self : str , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = XLMRobertaModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , "has_pre_transformation" , __lowerCamelCase )
if self.has_pre_transformation:
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
_SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.base_model(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__lowerCamelCase , )
if self.has_pre_transformation:
_SCREAMING_SNAKE_CASE = outputs["hidden_states"][-2]
_SCREAMING_SNAKE_CASE = self.pre_LN(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.transformation_pre(__lowerCamelCase )
return TransformationModelOutput(
projection_state=__lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_SCREAMING_SNAKE_CASE = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 111 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Dict:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> str:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 111 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (32, 32)
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def __A ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def __A ( self ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __A ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(lowerCAmelCase__ )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=lowerCAmelCase__ , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE = unet.half()
SCREAMING_SNAKE_CASE = text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=350 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type='np' , ).images
SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __A ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , output_type='np' , )
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 357 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""CLIPFeatureExtractor"""]
_UpperCAmelCase = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 | import logging
from transformers import PretrainedConfig
_UpperCAmelCase = logging.getLogger(__name__)
_UpperCAmelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''bertabs'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=5_1_2 , lowercase=6 , lowercase=5_1_2 , lowercase=8 , lowercase=5_1_2 , lowercase=0.2 , lowercase=6 , lowercase=7_6_8 , lowercase=8 , lowercase=2_0_4_8 , lowercase=0.2 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = vocab_size
A_ : Union[str, Any] = max_pos
A_ : List[str] = enc_layers
A_ : Tuple = enc_hidden_size
A_ : List[Any] = enc_heads
A_ : str = enc_ff_size
A_ : Optional[Any] = enc_dropout
A_ : Dict = dec_layers
A_ : Optional[Any] = dec_hidden_size
A_ : int = dec_heads
A_ : Any = dec_ff_size
A_ : List[str] = dec_dropout
| 140 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A=14 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=4 , _A=4 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=0.0_2 , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = rotary_dim
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = vocab_size - 1
__SCREAMING_SNAKE_CASE = vocab_size - 1
__SCREAMING_SNAKE_CASE = vocab_size - 1
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _A ( self , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 20
__SCREAMING_SNAKE_CASE = model_class_name(_A )
__SCREAMING_SNAKE_CASE = model.init_cache(input_ids.shape[0] , _A )
__SCREAMING_SNAKE_CASE = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__SCREAMING_SNAKE_CASE = model(
input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , )
__SCREAMING_SNAKE_CASE = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__SCREAMING_SNAKE_CASE = model(
input_ids[:, -1:] , attention_mask=_A , past_key_values=outputs_cache.past_key_values , position_ids=_A , )
__SCREAMING_SNAKE_CASE = model(_A )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _A ( self , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 20
__SCREAMING_SNAKE_CASE = model_class_name(_A )
__SCREAMING_SNAKE_CASE = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__SCREAMING_SNAKE_CASE = model.init_cache(input_ids.shape[0] , _A )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__SCREAMING_SNAKE_CASE = model(
input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , )
__SCREAMING_SNAKE_CASE = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__SCREAMING_SNAKE_CASE = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_A , position_ids=_A , )
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCamelCase__ : Dict = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxGPTJModelTester(self )
def _A ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_A , _A , _A , _A )
def _A ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_A , _A , _A , _A )
@tooslow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__SCREAMING_SNAKE_CASE = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_A , truncation=_A )
__SCREAMING_SNAKE_CASE = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = model.config.eos_token_id
__SCREAMING_SNAKE_CASE = jax.jit(model.generate )
__SCREAMING_SNAKE_CASE = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_A , _A )
@is_pt_flax_cross_test
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__SCREAMING_SNAKE_CASE = self._prepare_for_class(_A , _A )
__SCREAMING_SNAKE_CASE = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning
__SCREAMING_SNAKE_CASE = getattr(_A , _A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pt_inputs['input_ids'].shape
__SCREAMING_SNAKE_CASE = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = pt_model_class(_A ).eval()
__SCREAMING_SNAKE_CASE = model_class(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A )
__SCREAMING_SNAKE_CASE = fx_state
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model(**_A ).to_tuple()
__SCREAMING_SNAKE_CASE = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
__SCREAMING_SNAKE_CASE = model_class.from_pretrained(_A , from_pt=_A )
__SCREAMING_SNAKE_CASE = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ) , len(_A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__SCREAMING_SNAKE_CASE = self._prepare_for_class(_A , _A )
__SCREAMING_SNAKE_CASE = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning
__SCREAMING_SNAKE_CASE = getattr(_A , _A )
__SCREAMING_SNAKE_CASE = pt_model_class(_A ).eval()
__SCREAMING_SNAKE_CASE = model_class(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(_A , fx_model.params )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pt_inputs['input_ids'].shape
__SCREAMING_SNAKE_CASE = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model(**_A ).to_tuple()
__SCREAMING_SNAKE_CASE = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
__SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(_A , from_flax=_A )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ) , len(_A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def _A ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 118 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = LayoutLMTokenizer
UpperCamelCase__ : Any = LayoutLMTokenizerFast
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : int = True
def _A ( self ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _A ( self , **_A ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'UNwant\u00E9d,running'
__SCREAMING_SNAKE_CASE = 'unwanted, running'
return input_text, output_text
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 10, 8, 9] )
def _A ( self ):
'''simple docstring'''
pass
| 118 | 1 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : Tuple = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'})
UpperCAmelCase__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__A = CLIPTextModel(_A )
__A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__A = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self :Optional[int] , _A :Optional[int] , _A :Union[str, Any]=0 ) -> int:
'''simple docstring'''
if str(_A ).startswith('mps' ):
__A = torch.manual_seed(_A )
else:
__A = torch.Generator(device=_A ).manual_seed(_A )
__A = 2
__A = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , )
__A = floats_tensor(control_image.shape , rng=random.Random(_A ) ).to(_A )
__A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((64, 64) )
__A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def lowercase_ ( self :str ) -> List[str]:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowercase_ ( self :str ) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : str = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
UpperCAmelCase__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : List[Any] = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowercase_ ( self :int ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_A :Tuple ):
if isinstance(_A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_A )
torch.manual_seed(0 )
__A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_A )
torch.manual_seed(0 )
__A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__A = CLIPTextModel(_A )
__A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__A = MultiControlNetModel([controlneta, controlneta] )
__A = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self :Any , _A :Any , _A :Dict=0 ) -> Tuple:
'''simple docstring'''
if str(_A ).startswith('mps' ):
__A = torch.manual_seed(_A )
else:
__A = torch.Generator(device=_A ).manual_seed(_A )
__A = 2
__A = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , ),
]
__A = floats_tensor(control_image[0].shape , rng=random.Random(_A ) ).to(_A )
__A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((64, 64) )
__A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def lowercase_ ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
__A = self.get_dummy_components()
__A = self.pipeline_class(**_A )
pipe.to(_A )
__A = 10.0
__A = 4
__A = self.get_dummy_inputs(_A )
__A = steps
__A = scale
__A = pipe(**_A )[0]
__A = self.get_dummy_inputs(_A )
__A = steps
__A = scale
__A = pipe(**_A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__A = self.get_dummy_inputs(_A )
__A = steps
__A = scale
__A = pipe(**_A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__A = self.get_dummy_inputs(_A )
__A = steps
__A = scale
__A = pipe(**_A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowercase_ ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowercase_ ( self :List[str] ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowercase_ ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
__A = self.get_dummy_components()
__A = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :int ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :str ) -> Optional[Any]:
'''simple docstring'''
__A = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__A = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_A , controlnet=_A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A = 'evil space-punk bird'
__A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
__A = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
__A = pipe(
_A , _A , control_image=_A , generator=_A , output_type='np' , num_inference_steps=50 , strength=0.6 , )
__A = output.images[0]
assert image.shape == (512, 512, 3)
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 161 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Any = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
a__ : Dict = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> List[str]:
"""simple docstring"""
__A = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__A = int(re.match(R'.*layer_(\d*).*' , UpperCAmelCase )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def snake_case ( UpperCAmelCase )-> Any:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
__A = re.search(R'[^\d](\d+)$' , str(UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
__A = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> str:
"""simple docstring"""
# Construct model
if bloom_config_file == "":
__A = BloomConfig()
else:
__A = BloomConfig.from_json_file(UpperCAmelCase )
if shard_model:
__A = os.listdir(UpperCAmelCase )
__A = sorted(filter(lambda UpperCAmelCase : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase ) )
__A = {'weight_map': {}, 'metadata': {}}
__A = 0
__A = None
__A = BloomConfig()
for j, file in enumerate(UpperCAmelCase ):
print('Processing file: {}'.format(UpperCAmelCase ) )
__A = None
for i in range(UpperCAmelCase ):
# load all TP files
__A = file.replace('model_00' , f'model_0{i}' )
__A = torch.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
__A = list(temp.keys() )
for key in keys:
__A = temp.pop(UpperCAmelCase )
if tensors is None:
__A = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__A = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__A = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase , os.path.join(
UpperCAmelCase , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__A = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__A = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase ) ).zfill(5 ) )
__A = BloomConfig()
__A = pytorch_dump_folder_path + '/' + CONFIG_NAME
__A = total_size
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
__A = json.dumps(UpperCAmelCase , indent=2 , sort_keys=UpperCAmelCase ) + '\n'
f.write(UpperCAmelCase )
else:
__A = BloomModel(UpperCAmelCase )
__A = os.listdir(UpperCAmelCase )
__A = sorted(filter(lambda UpperCAmelCase : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase ) )
__A = None
for i, file in enumerate(UpperCAmelCase ):
__A = None
for i in range(UpperCAmelCase ):
# load all TP files
__A = file.replace('model_00' , f'model_0{i}' )
__A = torch.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
__A = list(temp.keys() )
for key in keys:
__A = temp.pop(UpperCAmelCase )
if tensors is None:
__A = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__A = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__A = tensors[key] / pretraining_tp
__A = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
__A = set(other_keys.missing_keys )
else:
__A = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
__A = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__A = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
__A = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
a__ : Tuple = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 161 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : int = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@dataclass(frozen=UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : PreTrainedTokenizer , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : bool = False , ) ->Union[str, Any]:
'''simple docstring'''
A__ = hans_processors[task]()
A__ = os.path.join(
UpperCAmelCase__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(UpperCAmelCase__) , UpperCAmelCase__ , ) , )
A__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A__ , A__ = label_list[2], label_list[1]
A__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + '''.lock'''
with FileLock(UpperCAmelCase__):
if os.path.exists(UpperCAmelCase__) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""")
A__ = torch.load(UpperCAmelCase__)
else:
logger.info(f"""Creating features from dataset file at {data_dir}""")
A__ = (
processor.get_dev_examples(UpperCAmelCase__) if evaluate else processor.get_train_examples(UpperCAmelCase__)
)
logger.info('''Training examples: %s''' , len(UpperCAmelCase__))
A__ = hans_convert_examples_to_features(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
logger.info('''Saving features into cached file %s''' , UpperCAmelCase__)
torch.save(self.features , UpperCAmelCase__)
def __len__( self : List[str]) ->int:
'''simple docstring'''
return len(self.features)
def __getitem__( self : Any , UpperCAmelCase__ : str) ->InputFeatures:
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 42
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : PreTrainedTokenizer , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] = 128 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : bool = False , ) ->Optional[Any]:
'''simple docstring'''
A__ = hans_processors[task]()
A__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A__ , A__ = label_list[2], label_list[1]
A__ = label_list
A__ = processor.get_dev_examples(UpperCAmelCase__) if evaluate else processor.get_train_examples(UpperCAmelCase__)
A__ = hans_convert_examples_to_features(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features) , desc='''convert examples to features'''):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(UpperCAmelCase__)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A__ = tf.data.Dataset.from_generator(
UpperCAmelCase__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([]),
'''input_ids''': tf.TensorShape([None, None]),
'''attention_mask''': tf.TensorShape([None, None]),
'''token_type_ids''': tf.TensorShape([None, None]),
},
tf.TensorShape([]),
) , )
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
return self.dataset
def __len__( self : Tuple) ->Any:
'''simple docstring'''
return len(self.features)
def __getitem__( self : List[str] , UpperCAmelCase__ : int) ->InputFeatures:
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
return self.label_list
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase__ , '''heuristics_train_set.txt''')) , '''train''')
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]) ->int:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase__ , '''heuristics_evaluation_set.txt''')) , '''dev''')
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple) ->List[str]:
'''simple docstring'''
A__ = []
for i, line in enumerate(UpperCAmelCase__):
if i == 0:
continue
A__ = '''%s-%s''' % (set_type, line[0])
A__ = line[5]
A__ = line[6]
A__ = line[7][2:] if line[7].startswith('''ex''') else line[7]
A__ = line[0]
examples.append(InputExample(guid=UpperCAmelCase__ , text_a=UpperCAmelCase__ , text_b=UpperCAmelCase__ , label=UpperCAmelCase__ , pairID=UpperCAmelCase__))
return examples
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
"""simple docstring"""
A__ = {label: i for i, label in enumerate(lowercase_ )}
A__ = []
for ex_index, example in tqdm.tqdm(enumerate(lowercase_ ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A__ = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowercase_ , max_length=lowercase_ , padding='''max_length''' , truncation=lowercase_ , return_overflowing_tokens=lowercase_ , )
A__ = label_map[example.label] if example.label in label_map else 0
A__ = int(example.pairID )
features.append(InputFeatures(**lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
_lowerCamelCase : int = {
"""hans""": 3,
}
_lowerCamelCase : int = {
"""hans""": HansProcessor,
}
| 371 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(lowercase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 231 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : int =["""pixel_values"""]
def __init__( self : Tuple , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Dict[str, int]] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : List[str] , ) ->None:
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"""shortest_edge""": 2_5_6}
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = do_resize
SCREAMING_SNAKE_CASE : List[str] = size
SCREAMING_SNAKE_CASE : str = resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop
SCREAMING_SNAKE_CASE : List[str] = crop_size
SCREAMING_SNAKE_CASE : List[Any] = do_rescale
SCREAMING_SNAKE_CASE : Dict = rescale_factor
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : int , ) ->np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE : Optional[Any] = get_resize_output_image_size(UpperCAmelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : int , ) ->np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCAmelCase__ )
return center_crop(UpperCAmelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] ) ->np.ndarray:
"""simple docstring"""
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Dict , ) ->np.ndarray:
"""simple docstring"""
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : List[str] , ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : str = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : List[Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : str = get_size_dict(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : int = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[Any] = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Optional[int] = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : List[str] = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : List[str] = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : int = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Any = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 245 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class a__ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : str = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def _lowercase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) ->Tuple:
"""simple docstring"""
return ((n << b) | (n >> (3_2 - b))) & 0XFF_FF_FF_FF
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = B"""\x80""" + B"""\x00""" * (6_3 - (len(self.data ) + 8) % 6_4)
SCREAMING_SNAKE_CASE : List[str] = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def _lowercase ( self : Dict ) ->List[Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def _lowercase ( self : int , UpperCAmelCase__ : Any ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = list(struct.unpack(""">16L""" , UpperCAmelCase__ ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def _lowercase ( self : Any ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.padding()
SCREAMING_SNAKE_CASE : Any = self.split_blocks()
for block in self.blocks:
SCREAMING_SNAKE_CASE : str = self.expand_block(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
SCREAMING_SNAKE_CASE : List[str] = (b & c) | ((~b) & d)
SCREAMING_SNAKE_CASE : str = 0X5A_82_79_99
elif 2_0 <= i < 4_0:
SCREAMING_SNAKE_CASE : List[Any] = b ^ c ^ d
SCREAMING_SNAKE_CASE : Any = 0X6E_D9_EB_A1
elif 4_0 <= i < 6_0:
SCREAMING_SNAKE_CASE : Union[str, Any] = (b & c) | (b & d) | (c & d)
SCREAMING_SNAKE_CASE : List[str] = 0X8F_1B_BC_DC
elif 6_0 <= i < 8_0:
SCREAMING_SNAKE_CASE : Dict = b ^ c ^ d
SCREAMING_SNAKE_CASE : int = 0XCA_62_C1_D6
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = (
self.rotate(UpperCAmelCase__ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(UpperCAmelCase__ , 3_0 ),
c,
d,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def __lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = B"""Test String"""
assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324
def __lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.read()
else:
SCREAMING_SNAKE_CASE : Tuple = bytes(_A , """utf-8""" )
print(SHAaHash(_A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 245 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Tuple = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 370 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase : Any = 16
lowercase : Optional[int] = 32
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Accelerator , _lowerCamelCase : int = 16) -> int:
'''simple docstring'''
__UpperCamelCase : Any = AutoTokenizer.from_pretrained("bert-base-cased")
__UpperCamelCase : Optional[Any] = load_dataset("glue" , "mrpc")
def tokenize_function(_lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase : Optional[int] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase : List[str] = tokenized_datasets.rename_column("label" , "labels")
def collate_fn(_lowerCamelCase : Union[str, Any]):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase : Dict = 8
else:
__UpperCamelCase : Optional[Any] = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
__UpperCamelCase : Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase)
__UpperCamelCase : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]) -> str:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase) == "1":
__UpperCamelCase : List[str] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCamelCase : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir)
else:
__UpperCamelCase : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase : List[str] = config["lr"]
__UpperCamelCase : Optional[Any] = int(config["num_epochs"])
__UpperCamelCase : List[Any] = int(config["seed"])
__UpperCamelCase : Any = int(config["batch_size"])
set_seed(_lowerCamelCase)
__UpperCamelCase , __UpperCamelCase : List[Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : List[str] = evaluate.load("glue" , "mrpc")
# If the batch size is too big we use gradient accumulation
__UpperCamelCase : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase : Union[str, Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase : Optional[int] = model.to(accelerator.device)
# Instantiate optimizer
__UpperCamelCase : List[str] = AdamW(params=model.parameters() , lr=_lowerCamelCase)
# Instantiate scheduler
__UpperCamelCase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCamelCase : Dict = os.path.split(_lowerCamelCase)[-1].split(".")[0]
accelerator.init_trackers(_lowerCamelCase , _lowerCamelCase)
# Now we train the model
for epoch in range(_lowerCamelCase):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCamelCase : Tuple = 0
for step, batch in enumerate(_lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
__UpperCamelCase : Dict = model(**_lowerCamelCase)
__UpperCamelCase : Any = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCamelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device)
with torch.no_grad():
__UpperCamelCase : Union[str, Any] = model(**_lowerCamelCase)
__UpperCamelCase : str = outputs.logits.argmax(dim=-1)
__UpperCamelCase , __UpperCamelCase : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
__UpperCamelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _lowerCamelCase)
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_lowerCamelCase),
"epoch": epoch,
} , step=_lowerCamelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : str = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU.")
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowerCamelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__UpperCamelCase : Union[str, Any] = parser.parse_args()
__UpperCamelCase : str = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main() | 151 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *_lowerCAmelCase : str , **_lowerCAmelCase : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : str ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Tuple , *_lowerCAmelCase : int , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : str , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : int , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : int ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : str ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[str] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Any ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Any , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : str , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Any , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : int , *_lowerCAmelCase : str , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_lowerCAmelCase : Any , **_lowerCAmelCase : str ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_lowerCAmelCase : int , **_lowerCAmelCase : str ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Dict , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : str , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_lowerCAmelCase : int , **_lowerCAmelCase : Dict ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Union[str, Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Dict ):
requires_backends(cls , ['torch'] )
def UpperCAmelCase_ ( *__UpperCAmelCase : str , **__UpperCAmelCase : List[Any] ) -> str:
requires_backends(__UpperCAmelCase , ['torch'] )
def UpperCAmelCase_ ( *__UpperCAmelCase : Any , **__UpperCAmelCase : Union[str, Any] ) -> Tuple:
requires_backends(__UpperCAmelCase , ['torch'] )
def UpperCAmelCase_ ( *__UpperCAmelCase : Any , **__UpperCAmelCase : Any ) -> List[str]:
requires_backends(__UpperCAmelCase , ['torch'] )
def UpperCAmelCase_ ( *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Tuple ) -> Optional[Any]:
requires_backends(__UpperCAmelCase , ['torch'] )
def UpperCAmelCase_ ( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Dict ) -> Any:
requires_backends(__UpperCAmelCase , ['torch'] )
def UpperCAmelCase_ ( *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str] ) -> Any:
requires_backends(__UpperCAmelCase , ['torch'] )
def UpperCAmelCase_ ( *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[int] ) -> Tuple:
requires_backends(__UpperCAmelCase , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[str] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Dict ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[str] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : str , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Dict ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[str] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : int ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : str , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Any , *_lowerCAmelCase : str , **_lowerCAmelCase : Any ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *_lowerCAmelCase : str , **_lowerCAmelCase : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Dict ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Any , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : str , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[str] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Any ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Union[str, Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : int , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_lowerCAmelCase : int , **_lowerCAmelCase : int ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Dict ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[Any] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *_lowerCAmelCase : str , **_lowerCAmelCase : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : Any ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : str , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[str] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : int , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : str , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : int ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Union[str, Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : int , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : str , *_lowerCAmelCase : Dict , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : str , *_lowerCAmelCase : str , **_lowerCAmelCase : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : int , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : int , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Tuple , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : Any , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Tuple , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : List[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : str , *_lowerCAmelCase : int , **_lowerCAmelCase : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : str , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : int , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Dict , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : int , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Tuple , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Tuple , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_lowerCAmelCase : str , **_lowerCAmelCase : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Dict ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Dict , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *_lowerCAmelCase : Any , **_lowerCAmelCase : int ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : str , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Tuple , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Dict , *_lowerCAmelCase : str , **_lowerCAmelCase : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : int , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Dict , *_lowerCAmelCase : Any , **_lowerCAmelCase : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : str ):
requires_backends(cls , ['torch'] )
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['torch'] ) | 225 |
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number | (1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 225 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
SCREAMING_SNAKE_CASE : int = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> Dict:
_lowercase : str = create_model(
'HTSAT-tiny' , 'roberta' , __SCREAMING_SNAKE_CASE , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
_lowercase : Union[str, Any] = {}
_lowercase : List[Any] = R""".*sequential.(\d+).*"""
_lowercase : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowercase : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
_lowercase : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
_lowercase : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_lowercase : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowercase : List[Any] = 1 if projecton_layer == 0 else 2
_lowercase : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowercase : Optional[int] = value
_lowercase : Any = mixed_qkv.size(0 ) // 3
_lowercase : List[Any] = mixed_qkv[:qkv_dim]
_lowercase : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
_lowercase : List[Any] = mixed_qkv[qkv_dim * 2 :]
_lowercase : Any = query_layer
_lowercase : Dict = key_layer
_lowercase : Optional[Any] = value_layer
else:
_lowercase : List[str] = value
return model_state_dict
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Any:
_lowercase : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
_lowercase : Tuple = clap_model.state_dict()
_lowercase : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = ClapConfig()
_lowercase : Tuple = enable_fusion
_lowercase : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 370 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowerCamelCase( _a ):
lowercase_ : Union[str, Any] = """char"""
lowercase_ : Any = """bpe"""
lowercase_ : Optional[int] = """wp"""
SCREAMING_SNAKE_CASE : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowerCamelCase( _a ):
lowercase_ : Any = ["""image_processor""", """char_tokenizer"""]
lowercase_ : Tuple = """ViTImageProcessor"""
lowercase_ : List[str] = """MgpstrTokenizer"""
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase, )
_lowercase : str = kwargs.pop('feature_extractor')
_lowercase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
_lowercase : List[Any] = tokenizer
_lowercase : Tuple = AutoTokenizer.from_pretrained('gpt2')
_lowercase : Tuple = AutoTokenizer.from_pretrained('bert-base-uncased')
super().__init__(lowerCamelCase, lowerCamelCase)
def __call__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
_lowercase : Optional[Any] = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if text is not None:
_lowercase : Optional[int] = self.char_tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowercase : Optional[int] = encodings['input_ids']
return inputs
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase , _lowercase : Optional[int] = sequences
_lowercase : str = char_preds.size(0)
_lowercase , _lowercase : List[Any] = self._decode_helper(lowerCamelCase, 'char')
_lowercase , _lowercase : str = self._decode_helper(lowerCamelCase, 'bpe')
_lowercase , _lowercase : str = self._decode_helper(lowerCamelCase, 'wp')
_lowercase : Dict = []
_lowercase : Any = []
for i in range(lowerCamelCase):
_lowercase : Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowercase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowercase : Union[str, Any] = scores.index(max(lowerCamelCase))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
_lowercase : str = {}
_lowercase : int = final_strs
_lowercase : Optional[Any] = final_scores
_lowercase : Tuple = char_strs
_lowercase : Dict = bpe_strs
_lowercase : Tuple = wp_strs
return out
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
if format == DecodeType.CHARACTER:
_lowercase : Optional[Any] = self.char_decode
_lowercase : int = 1
_lowercase : int = '[s]'
elif format == DecodeType.BPE:
_lowercase : List[Any] = self.bpe_decode
_lowercase : Union[str, Any] = 2
_lowercase : Any = '#'
elif format == DecodeType.WORDPIECE:
_lowercase : int = self.wp_decode
_lowercase : Optional[Any] = 1_02
_lowercase : List[Any] = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''')
_lowercase , _lowercase : Tuple = [], []
_lowercase : str = pred_logits.size(0)
_lowercase : Tuple = pred_logits.size(1)
_lowercase , _lowercase : Dict = pred_logits.topk(1, dim=-1, largest=lowerCamelCase, sorted=lowerCamelCase)
_lowercase : List[str] = preds_index.view(-1, lowerCamelCase)[:, 1:]
_lowercase : int = decoder(lowerCamelCase)
_lowercase , _lowercase : Optional[Any] = torch.nn.functional.softmax(lowerCamelCase, dim=2).max(dim=2)
_lowercase : Optional[Any] = preds_max_prob[:, 1:]
for index in range(lowerCamelCase):
_lowercase : List[str] = preds_str[index].find(lowerCamelCase)
_lowercase : int = preds_str[index][:pred_eos]
_lowercase : List[str] = preds_index[index].cpu().tolist()
_lowercase : Optional[int] = pred_index.index(lowerCamelCase) if eos_token in pred_index else -1
_lowercase : int = preds_max_prob[index][: pred_eos_index + 1]
_lowercase : Tuple = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase)
conf_scores.append(lowerCamelCase)
return dec_strs, conf_scores
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Dict = [seq.replace(' ', '') for seq in self.char_tokenizer.batch_decode(lowerCamelCase)]
return decode_strs
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = [seq.replace(' ', '') for seq in self.wp_tokenizer.batch_decode(lowerCamelCase)]
return decode_strs
| 84 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class A ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'swin'
lowerCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any],lowercase_ : Union[str, Any]=2_2_4,lowercase_ : List[str]=4,lowercase_ : int=3,lowercase_ : int=9_6,lowercase_ : Optional[Any]=[2, 2, 6, 2],lowercase_ : Optional[Any]=[3, 6, 1_2, 2_4],lowercase_ : List[Any]=7,lowercase_ : List[Any]=4.0,lowercase_ : List[str]=True,lowercase_ : Union[str, Any]=0.0,lowercase_ : Dict=0.0,lowercase_ : str=0.1,lowercase_ : List[Any]="gelu",lowercase_ : Any=False,lowercase_ : Optional[Any]=0.02,lowercase_ : List[str]=1E-5,lowercase_ : Any=3_2,lowercase_ : Tuple=None,lowercase_ : Tuple=None,**lowercase_ : List[Any],)-> Dict:
'''simple docstring'''
super().__init__(**lowercase_ )
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = len(lowercase_ )
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = layer_norm_eps
A__ = initializer_range
A__ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = version.parse('1.11' )
@property
def snake_case__ ( self : str )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case__ ( self : Optional[Any] )-> float:
'''simple docstring'''
return 1E-4
| 7 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class A ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'resnet'
lowerCamelCase = ['basic', 'bottleneck']
def __init__( self : Optional[Any],lowercase_ : int=3,lowercase_ : List[str]=6_4,lowercase_ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8],lowercase_ : Tuple=[3, 4, 6, 3],lowercase_ : Union[str, Any]="bottleneck",lowercase_ : List[str]="relu",lowercase_ : Tuple=False,lowercase_ : List[str]=None,lowercase_ : List[Any]=None,**lowercase_ : str,)-> Optional[Any]:
'''simple docstring'''
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
A__ = num_channels
A__ = embedding_size
A__ = hidden_sizes
A__ = depths
A__ = layer_type
A__ = hidden_act
A__ = downsample_in_first_stage
A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = version.parse('1.11' )
@property
def snake_case__ ( self : List[Any] )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case__ ( self : Any )-> float:
'''simple docstring'''
return 1E-3
| 7 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a :List[Any] = logging.get_logger(__name__)
__a :int = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = 'perceiver'
def __init__( self : str , UpperCAmelCase : Any=256 , UpperCAmelCase : str=1280 , UpperCAmelCase : str=768 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Dict=26 , UpperCAmelCase : Tuple=8 , UpperCAmelCase : List[Any]=8 , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : List[str]="kv" , UpperCAmelCase : str=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : List[str]=0.02 , UpperCAmelCase : str=1E-12 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[Any]=262 , UpperCAmelCase : Dict=2048 , UpperCAmelCase : List[Any]=56 , UpperCAmelCase : Dict=[368, 496] , UpperCAmelCase : int=16 , UpperCAmelCase : Optional[int]=1920 , UpperCAmelCase : Tuple=16 , UpperCAmelCase : str=[1, 16, 224, 224] , **UpperCAmelCase : List[Any] , ):
super().__init__(**UpperCAmelCase )
A_ = num_latents
A_ = d_latents
A_ = d_model
A_ = num_blocks
A_ = num_self_attends_per_block
A_ = num_self_attention_heads
A_ = num_cross_attention_heads
A_ = qk_channels
A_ = v_channels
A_ = cross_attention_shape_for_attention
A_ = self_attention_widening_factor
A_ = cross_attention_widening_factor
A_ = hidden_act
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = layer_norm_eps
A_ = use_query_residual
# masked language modeling attributes
A_ = vocab_size
A_ = max_position_embeddings
# image classification attributes
A_ = image_size
# flow attributes
A_ = train_size
# multimodal autoencoding attributes
A_ = num_frames
A_ = audio_samples_per_frame
A_ = samples_per_patch
A_ = output_shape
class _a ( snake_case_ ):
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
if self.task == "multiple-choice":
A_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __A ( self : List[Any] ):
return 1E-4
def __A ( self : Dict , UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 40 , UpperCAmelCase : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(UpperCAmelCase , UpperCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ = preprocessor.num_special_tokens_to_add(UpperCAmelCase )
A_ = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A_ = [" ".join(["a"] ) * seq_length] * batch_size
A_ = dict(preprocessor(UpperCAmelCase , return_tensors=UpperCAmelCase ) )
A_ = inputs.pop("input_ids" )
return inputs
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ = compute_effective_axis_dimension(UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
A_ = self._generate_dummy_images(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = dict(preprocessor(images=UpperCAmelCase , return_tensors=UpperCAmelCase ) )
A_ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." ) | 329 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size | 329 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
lowerCAmelCase__ = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
lowerCAmelCase__ = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
lowerCAmelCase__ = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int ):
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str=0.9 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Union[str, Any]=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
_lowerCamelCase : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(__lowerCAmelCase ) , word_tokenize(__lowerCAmelCase ) , alpha=__lowerCAmelCase , beta=__lowerCAmelCase , gamma=__lowerCAmelCase )
for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase )
]
else:
_lowerCamelCase : Optional[int] = [
meteor_score.single_meteor_score(__lowerCAmelCase , __lowerCAmelCase , alpha=__lowerCAmelCase , beta=__lowerCAmelCase , gamma=__lowerCAmelCase )
for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase )
]
return {"meteor": np.mean(__lowerCAmelCase )}
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(A_ )
# Initialize Result
_lowerCamelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 72 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCamelCase_ ( unittest.TestCase , __a ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = load_tool('''text-to-speech''' )
self.tool.setup()
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = self.tool('''hey''' )
UpperCAmelCase__ : Dict = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def lowercase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = self.tool('''hey''' )
UpperCAmelCase__ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 299 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCamelCase_ ( __a ):
def __get__( self : str , _A : Tuple , _A : List[str]=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase__ : Union[str, Any] = '''__cached_''' + self.fget.__name__
UpperCAmelCase__ : Any = getattr(_A , _A , _A )
if cached is None:
UpperCAmelCase__ : Dict = self.fget(_A )
setattr(_A , _A , _A )
return cached
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : Tuple = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
if is_torch_fx_proxy(lowerCAmelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase__ , np.ndarray )
def a__ ( lowerCAmelCase__ ) -> Any:
return isinstance(lowerCAmelCase__ , np.ndarray )
def a__ ( lowerCAmelCase__ ) -> int:
return _is_numpy(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
import torch
return isinstance(lowerCAmelCase__ , torch.Tensor )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_torch_available() else _is_torch(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
import torch
return isinstance(lowerCAmelCase__ , torch.device )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Any:
import torch
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return False
return isinstance(lowerCAmelCase__ , torch.dtype )
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> List[Any]:
import tensorflow as tf
return isinstance(lowerCAmelCase__ , tf.Tensor )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Any:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(lowerCAmelCase__ )
return type(lowerCAmelCase__ ) == tf.Tensor
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Tuple:
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase__ , jnp.ndarray )
def a__ ( lowerCAmelCase__ ) -> List[Any]:
return False if not is_flax_available() else _is_jax(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Tuple:
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return [to_py_obj(lowerCAmelCase__ ) for o in obj]
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ ).tolist()
elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( lowerCAmelCase__ ) -> Tuple:
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return np.array(lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ )
else:
return obj
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = fields(self )
# Safety and consistency checks
if not len(_A ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
UpperCAmelCase__ : Dict = getattr(self , class_fields[0].name )
UpperCAmelCase__ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_A ):
if isinstance(_A , _A ):
UpperCAmelCase__ : List[Any] = first_field.items()
UpperCAmelCase__ : Optional[int] = True
else:
try:
UpperCAmelCase__ : Optional[int] = iter(_A )
UpperCAmelCase__ : Optional[int] = True
except TypeError:
UpperCAmelCase__ : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_A ):
if (
not isinstance(_A , (list, tuple) )
or not len(_A ) == 2
or not isinstance(element[0] , _A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase__ : List[Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCAmelCase__ : List[str] = element[1]
elif first_field is not None:
UpperCAmelCase__ : Optional[Any] = first_field
else:
for field in class_fields:
UpperCAmelCase__ : Optional[int] = getattr(self , field.name )
if v is not None:
UpperCAmelCase__ : str = v
def __delitem__( self : Union[str, Any] , *_A : Any , **_A : str ):
'''simple docstring'''
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Any , *_A : List[str] , **_A : Tuple ):
'''simple docstring'''
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Optional[Any] , *_A : Any , **_A : Tuple ):
'''simple docstring'''
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Optional[Any] , *_A : Dict , **_A : List[Any] ):
'''simple docstring'''
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : List[str] , _A : Any ):
'''simple docstring'''
if isinstance(_A , _A ):
UpperCAmelCase__ : Union[str, Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int , _A : Union[str, Any] , _A : str ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_A , _A )
super().__setattr__(_A , _A )
def __setitem__( self : Any , _A : Optional[int] , _A : List[str] ):
'''simple docstring'''
super().__setitem__(_A , _A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class lowerCamelCase_ ( __a , __a ):
@classmethod
def lowercase_ ( cls : Optional[Any] , _A : Optional[Any] ):
'''simple docstring'''
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'longest'
lowerCAmelCase__ = 'max_length'
lowerCAmelCase__ = 'do_not_pad'
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'pt'
lowerCAmelCase__ = 'tf'
lowerCAmelCase__ = 'np'
lowerCAmelCase__ = 'jax'
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : List[ContextManager] ):
'''simple docstring'''
UpperCAmelCase__ : str = context_managers
UpperCAmelCase__ : int = ExitStack()
def __enter__( self : str ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_A )
def __exit__( self : Dict , *_A : List[Any] , **_A : str ):
'''simple docstring'''
self.stack.__exit__(*_A , **_A )
def a__ ( lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : int = infer_framework(lowerCAmelCase__ )
if framework == "tf":
UpperCAmelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : Dict = model_class.__name__
UpperCAmelCase__ : Union[str, Any] = infer_framework(lowerCAmelCase__ )
if framework == "tf":
UpperCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ : int = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = "" , lowerCAmelCase__ = "." ) -> Any:
def _flatten_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ):
for k, v in d.items():
UpperCAmelCase__ : int = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k
if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
@contextmanager
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.T if axes is None else array.permute(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
if is_numpy_array(lowerCAmelCase__ ):
return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.reshape(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.unsqueeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ ) -> int:
if is_numpy_array(lowerCAmelCase__ ):
return np.size(lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.size(lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
for key, value in auto_map.items():
if isinstance(lowerCAmelCase__ , (tuple, list) ):
UpperCAmelCase__ : int = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase__ : str = F"""{repo_id}--{value}"""
return auto_map
def a__ ( lowerCAmelCase__ ) -> Tuple:
for base_class in inspect.getmro(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = base_class.__module__
UpperCAmelCase__ : Optional[int] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 299 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCamelCase : Tuple =logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __a :
_lowerCAmelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowerCAmelCase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCAmelCase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCAmelCase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __a :
_lowerCAmelCase : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
_lowerCAmelCase : str = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
_lowerCAmelCase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCAmelCase : bool = field(
default=A__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
UpperCamelCase__ : Any = processors[data_args.task_name]()
UpperCamelCase__ : Any = processor.get_labels()
UpperCamelCase__ : Dict = len(__lowerCAmelCase )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase__ : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase__ : List[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__lowerCAmelCase ) -> Dict:
UpperCamelCase__ : Any = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__lowerCAmelCase , p.label_ids )}
# Data collator
UpperCamelCase__ : Optional[Any] = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase__ : Tuple = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ : str = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase__ : int = trainer.evaluate()
UpperCamelCase__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(__lowerCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , __lowerCAmelCase , __lowerCAmelCase )
writer.write("%s = %s\n" % (key, value) )
results.update(__lowerCAmelCase )
return results
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 189 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCamelCase : Optional[Any] =subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCamelCase : str =subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
lowerCamelCase : List[Any] ='''|'''.join(sys.argv[1:])
lowerCamelCase : str =re.compile(RF"""^({joined_dirs}).*?\.py$""")
lowerCamelCase : Optional[int] =[x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 189 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__snake_case : Tuple =logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> None:
"""simple docstring"""
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' ,__lowerCamelCase ,)
super().__init__(*__lowerCamelCase ,**__lowerCamelCase )
| 360 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase__ ( lowerCamelCase_ : ndarray):
'''simple docstring'''
return np.dot(lowerCamelCase_ ,lowerCamelCase_)
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,*,
__lowerCamelCase = np.inf ,__lowerCamelCase = "linear" ,__lowerCamelCase = 0.0 ,) -> None:
"""simple docstring"""
lowerCAmelCase__ : Any = regularization
lowerCAmelCase__ : str = gamma
if kernel == "linear":
lowerCAmelCase__ : Dict = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowerCAmelCase__ : Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCAmelCase__ : List[str] = f"""Unknown kernel: {kernel}"""
raise ValueError(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> float:
"""simple docstring"""
return np.dot(__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = observations
lowerCAmelCase__ : Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCAmelCase__) , ) : List[str] = np.shape(__lowerCamelCase )
def to_minimize(__lowerCamelCase ) -> float:
lowerCAmelCase__ : List[str] = 0
((lowerCAmelCase__) , ) : str = np.shape(__lowerCamelCase )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(__lowerCamelCase )
lowerCAmelCase__ : List[str] = LinearConstraint(__lowerCamelCase ,0 ,0 )
lowerCAmelCase__ : List[str] = Bounds(0 ,self.regularization )
lowerCAmelCase__ : int = minimize(
__lowerCamelCase ,np.ones(__lowerCamelCase ) ,bounds=__lowerCamelCase ,constraints=[ly_contraint] ).x
lowerCAmelCase__ : List[Any] = l_star
# calculating mean offset of separation plane to points
lowerCAmelCase__ : Optional[Any] = 0
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
lowerCAmelCase__ : Dict = s / n
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,__lowerCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=A, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=A, default=5 )
parser.add_argument("--batch_size", type=A, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=A, default=1 )
parser.add_argument("--freeze", type=A, default=A )
parser.add_argument("--learning_rate", type=A, default=5E-4 )
parser.add_argument("--seed", type=A, default=0 )
parser.add_argument("--lr_scheduler_type", type=A, default="cosine" )
parser.add_argument("--num_warmup_steps", type=A, default=10 )
parser.add_argument("--weight_decay", type=A, default=0.01 )
parser.add_argument("--output_dir", type=A, default="./results" )
return parser.parse_args()
__lowerCAmelCase : List[str] = load('accuracy')
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a , a = eval_pred
a = np.argmax(A, axis=1 )
return metric.compute(predictions=A, references=A )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> None:
super().__init__()
a = trainer
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , **__lowerCamelCase : int ) -> Any:
if control.should_evaluate:
a = deepcopy(__lowerCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def __magic_name__ ( ):
'''simple docstring'''
a = get_args()
set_seed(args.seed )
a = load_dataset("codeparrot/codecomplex", split="train" )
a = dataset.train_test_split(test_size=0.2 )
a = train_test["test"].train_test_split(test_size=0.5 )
a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
a = False
a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(A : str ):
a = tokenizer(example["src"], truncation=A, max_length=1024 )
a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
a = train_test_validation.map(
A, batched=A, remove_columns=train_test_validation["train"].column_names, )
a = DataCollatorWithPadding(tokenizer=A )
a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
a = Trainer(
model=A, args=A, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=A, data_collator=A, compute_metrics=A, )
print("Training..." )
trainer.add_callback(CustomCallback(A ) )
trainer.train()
if __name__ == "__main__":
main()
| 107 | """simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return (-y * np.log(_UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCamelCase ) ) )
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[Any]=7_0_0_0_0 ) -> Optional[int]:
"""simple docstring"""
snake_case = np.zeros(x.shape[1] )
for iterations in range(_UpperCamelCase ):
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
snake_case = sigmoid_function(_UpperCamelCase )
snake_case = np.dot(x.T , h - y ) / y.size
snake_case = theta - alpha * gradient # updating the weights
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
snake_case = sigmoid_function(_UpperCamelCase )
snake_case = cost_function(_UpperCamelCase , _UpperCamelCase )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = datasets.load_iris()
SCREAMING_SNAKE_CASE__ = iris.data[:, :2]
SCREAMING_SNAKE_CASE__ = (iris.target != 0) * 1
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = logistic_reg(alpha, x, y, max_iterations=70_000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return sigmoid_function(
np.dot(_UpperCamelCase , _UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = (x[:, 0].min(), x[:, 0].max())
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = (x[:, 1].min(), x[:, 1].max())
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
SCREAMING_SNAKE_CASE__ = np.c_[xxa.ravel(), xxa.ravel()]
SCREAMING_SNAKE_CASE__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 150 | 0 |
"""simple docstring"""
from copy import deepcopy
class snake_case :
def __init__( self : int , A : list[int] | None = None , A : int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
a : Optional[Any] = size
a : List[str] = [0] * size
elif arr is not None:
self.init(A )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCamelCase__ ( self : Dict , A : list[int] ):
'''simple docstring'''
a : int = len(A )
a : Any = deepcopy(A )
for i in range(1 , self.size ):
a : Any = self.next_(A )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : Any = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a : Optional[Any] = self.next_(A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
return index - (index & (-index))
def lowerCamelCase__ ( self : Tuple , A : int , A : int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a : int = self.next_(A )
def lowerCamelCase__ ( self : int , A : int , A : int ):
'''simple docstring'''
self.add(A , value - self.get(A ) )
def lowerCamelCase__ ( self : Dict , A : int ):
'''simple docstring'''
if right == 0:
return 0
a : Optional[Any] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a : List[Any] = self.prev(A )
return result
def lowerCamelCase__ ( self : Optional[Any] , A : int , A : int ):
'''simple docstring'''
return self.prefix(A ) - self.prefix(A )
def lowerCamelCase__ ( self : List[str] , A : int ):
'''simple docstring'''
return self.query(A , index + 1 )
def lowerCamelCase__ ( self : Dict , A : int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
a : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a : List[str] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_UpperCamelCase : int = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case :
__magic_name__ = PegasusConfig
__magic_name__ = {}
__magic_name__ = '''gelu'''
def __init__( self : int , A : Optional[int] , A : Dict=1_3 , A : Tuple=7 , A : Union[str, Any]=True , A : Union[str, Any]=False , A : int=9_9 , A : Any=3_2 , A : str=5 , A : Optional[int]=4 , A : List[Any]=3_7 , A : Optional[Any]=0.1 , A : Tuple=0.1 , A : List[Any]=2_0 , A : Optional[int]=2 , A : Dict=1 , A : List[Any]=0 , ):
'''simple docstring'''
a : Dict = parent
a : Optional[Any] = batch_size
a : Any = seq_length
a : Dict = is_training
a : Optional[Any] = use_labels
a : List[str] = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Any = num_attention_heads
a : Any = intermediate_size
a : Optional[Any] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Dict = eos_token_id
a : Tuple = pad_token_id
a : str = bos_token_id
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
a : Dict = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
a : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a : Union[str, Any] = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[Any] , A : List[Any] , A : Optional[Any] , A : Dict ):
'''simple docstring'''
a : List[Any] = 2_0
a : int = model_class_name(A )
a : Union[str, Any] = model.encode(inputs_dict['input_ids'] )
a, a : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a : Any = model.init_cache(decoder_input_ids.shape[0] , A , A )
a : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
a : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
a : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
a : Union[str, Any] = model.decode(A , A )
a : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Optional[int] , A : Tuple , A : str , A : str ):
'''simple docstring'''
a : Optional[Any] = 2_0
a : int = model_class_name(A )
a : Any = model.encode(inputs_dict['input_ids'] )
a, a : Dict = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a : Tuple = model.init_cache(decoder_input_ids.shape[0] , A , A )
a : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : str = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
a : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a : str = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
a : List[Any] = model.decode(A , A , decoder_attention_mask=A )
a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def snake_case (A_ :List[Any] , A_ :Tuple , A_ :List[str] , A_ :List[Any]=None , A_ :Any=None , ):
'''simple docstring'''
if attention_mask is None:
a : Optional[Any] = np.not_equal(A_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
a : List[str] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__magic_name__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : str = FlaxPegasusModelTester(self )
a : str = ConfigTester(self , config_class=A )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a, a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a, a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a, a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a : List[Any] = self._prepare_for_class(A , A )
a : str = model_class(A )
@jax.jit
def encode_jitted(A : str , A : List[Any]=None , **A : str ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest('JIT Enabled' ):
a : Optional[int] = encode_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a : Optional[int] = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a, a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a : str = model_class(A )
a : Union[str, Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
a : int = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(A : Optional[int] , A : Tuple , A : Dict ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest('JIT Enabled' ):
a : Any = decode_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a : Optional[Any] = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a : Dict = model_class_name.from_pretrained('google/pegasus-large' , from_pt=A )
a : Dict = np.ones((1, 1) )
a : List[Any] = model(A )
self.assertIsNotNone(A )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : List[Any] = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
a : Tuple = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
a : Any = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
a : Tuple = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
a : List[Any] = tokenizer(A , return_tensors='np' , truncation=A , max_length=5_1_2 , padding=A )
a : Any = model.generate(**A , num_beams=2 ).sequences
a : Optional[Any] = tokenizer.batch_decode(A , skip_special_tokens=A )
assert tgt_text == decoded
| 186 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = None
lowerCAmelCase__ = BloomTokenizerFast
lowerCAmelCase__ = BloomTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = """tokenizer_file"""
lowerCAmelCase__ = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def UpperCAmelCase__ ( self : Optional[Any] ):
super().setUp()
__snake_case: Tuple = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : str , **A : Dict ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = self.get_rust_tokenizer()
__snake_case: str = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__snake_case: Optional[int] = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
__snake_case: int = tokenizer.batch_encode_plus(A )["""input_ids"""]
self.assertListEqual(A , A )
__snake_case: Tuple = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Any]=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__snake_case: Any = """This is a simple input"""
__snake_case: Any = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case: Tuple = ("""This is a simple input""", """This is a pair""")
__snake_case: Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(A , max_length=A )
tokenizer_r.encode_plus(A , max_length=A )
tokenizer_r.batch_encode_plus(A , max_length=A )
tokenizer_r.encode(A , max_length=A )
tokenizer_r.batch_encode_plus(A , max_length=A )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__snake_case: str = None # Hotfixing padding = None
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Dict = self.get_rust_tokenizer()
__snake_case: Union[str, Any] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=A )
__snake_case: Dict = next(iter(A ) )["""premise"""] # pick up one data
__snake_case: List[str] = list(sample_data.values() )
__snake_case: Tuple = list(map(tokenizer.encode , A ) )
__snake_case: Any = [tokenizer.decode(A , clean_up_tokenization_spaces=A ) for x in output_tokens]
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 111 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any , A : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__snake_case: Dict = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
__snake_case: Dict = TensorFlowBenchmark(A )
__snake_case: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = """sgugger/tiny-distilbert-classification"""
__snake_case: str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
__snake_case: Any = TensorFlowBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: int = TensorFlowBenchmark(A )
__snake_case: str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = """sshleifer/tiny-gpt2"""
__snake_case: int = AutoConfig.from_pretrained(A )
__snake_case: Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
__snake_case: Union[str, Any] = TensorFlowBenchmark(A , [config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[int] = """sshleifer/tiny-gpt2"""
__snake_case: Tuple = AutoConfig.from_pretrained(A )
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: str = TensorFlowBenchmark(A , [config] )
__snake_case: str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Tuple = """sshleifer/tiny-gpt2"""
__snake_case: str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Tuple = TensorFlowBenchmark(A )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = AutoConfig.from_pretrained(A )
__snake_case: Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: List[str] = TensorFlowBenchmark(A , [config] )
__snake_case: Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = """patrickvonplaten/t5-tiny-random"""
__snake_case: List[str] = AutoConfig.from_pretrained(A )
__snake_case: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Optional[int] = TensorFlowBenchmark(A , configs=[config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = """sshleifer/tiny-gpt2"""
__snake_case: List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=A , multi_process=A , )
__snake_case: Union[str, Any] = TensorFlowBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[str] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(A , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(A , """env.csv""" ) , multi_process=A , )
__snake_case: Tuple = TensorFlowBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """env.csv""" ) ).exists() )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Any = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A : Union[str, Any] ):
self.assertTrue(hasattr(A , """sequential""" ) )
self.assertTrue(hasattr(A , """cumulative""" ) )
self.assertTrue(hasattr(A , """current""" ) )
self.assertTrue(hasattr(A , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , """log.txt""" ) , log_print=A , trace_memory_line_by_line=A , eager_mode=A , multi_process=A , )
__snake_case: Dict = TensorFlowBenchmark(A )
__snake_case: List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(A , """log.txt""" ) ).exists() )
| 111 | 1 |
"""simple docstring"""
def __A ( a_ :int) -> None:
__a : List[Any] = generate_pascal_triangle(a_)
for row_idx in range(a_):
# Print left spaces
for _ in range(num_rows - row_idx - 1):
print(end=''' ''')
# Print row values
for col_idx in range(row_idx + 1):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''')
else:
print(triangle[row_idx][col_idx] , end='''''')
print()
def __A ( a_ :int) -> list[list[int]]:
if not isinstance(a_ , a_):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''')
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''')
__a : list[list[int]] = []
for current_row_idx in range(a_):
__a : int = populate_current_row(a_ , a_)
triangle.append(a_)
return triangle
def __A ( a_ :list[list[int]] , a_ :int) -> list[int]:
__a : Optional[int] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__a , __a : Optional[int] = 1, 1
for current_col_idx in range(1 , a_):
calculate_current_element(
a_ , a_ , a_ , a_)
return current_row
def __A ( a_ :list[list[int]] , a_ :list[int] , a_ :int , a_ :int , ) -> None:
__a : Dict = triangle[current_row_idx - 1][current_col_idx - 1]
__a : Optional[Any] = triangle[current_row_idx - 1][current_col_idx]
__a : List[Any] = above_to_left_elt + above_to_right_elt
def __A ( a_ :int) -> list[list[int]]:
if not isinstance(a_ , a_):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''')
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''')
__a : list[list[int]] = [[1]]
for row_index in range(1 , a_):
__a : str = [0] + result[-1] + [0]
__a : Tuple = row_index + 1
# Calculate the number of distinct elements in a row
__a : Any = sum(divmod(a_ , 2))
__a : List[str] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1)
]
__a : List[str] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__a : Optional[int] = row_first_half + row_second_half
result.append(a_)
return result
def __A ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a_ :Callable , a_ :int) -> None:
__a : Tuple = F"""{func.__name__}({value})"""
__a : Union[str, Any] = timeit(F"""__main__.{call}""" , setup='''import __main__''')
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""")
for value in range(15): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a_ , a_)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 188 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = size if size is not None else {'''height''': 18, '''width''': 18}
__a : List[Any] = parent
__a : Dict = batch_size
__a : Dict = num_channels
__a : int = image_size
__a : Optional[Any] = min_resolution
__a : Optional[int] = max_resolution
__a : Dict = do_resize
__a : List[Any] = size
__a : int = do_normalize
__a : Optional[Any] = image_mean
__a : int = image_std
def _lowerCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : int = EfficientFormerImageProcessorTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processor
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Dict = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__a : Any = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processor
__a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[int] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processor
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__a : Tuple = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , ) | 188 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :str = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """layoutlmv3"""
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=5_0_2_6_5 , _lowerCAmelCase : Dict=7_6_8 , _lowerCAmelCase : Any=1_2 , _lowerCAmelCase : int=1_2 , _lowerCAmelCase : str=3_0_7_2 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Optional[Any]=5_1_2 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Union[str, Any]=1e-5 , _lowerCAmelCase : Any=1 , _lowerCAmelCase : Optional[int]=0 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Dict=1_0_2_4 , _lowerCAmelCase : List[Any]=1_2_8 , _lowerCAmelCase : str=1_2_8 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=3_2 , _lowerCAmelCase : List[Any]=1_2_8 , _lowerCAmelCase : str=6_4 , _lowerCAmelCase : List[str]=2_5_6 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=2_2_4 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Dict=1_6 , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__lowerCamelCase , hidden_size=__lowerCamelCase , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , intermediate_size=__lowerCamelCase , hidden_act=__lowerCamelCase , hidden_dropout_prob=__lowerCamelCase , attention_probs_dropout_prob=__lowerCamelCase , max_position_embeddings=__lowerCamelCase , type_vocab_size=__lowerCamelCase , initializer_range=__lowerCamelCase , layer_norm_eps=__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
snake_case_ = max_ad_position_embeddings
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = has_relative_attention_bias
snake_case_ = rel_pos_bins
snake_case_ = max_rel_pos
snake_case_ = has_spatial_attention_bias
snake_case_ = rel_ad_pos_bins
snake_case_ = max_rel_ad_pos
snake_case_ = text_embed
snake_case_ = visual_embed
snake_case_ = input_size
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = classifier_dropout
class __lowerCAmelCase ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = version.parse('1.12' )
@property
def lowerCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def lowerCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
return 1e-5
@property
def lowerCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
return 1_2
def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : "ProcessorMixin" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional["TensorType"] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 4_0 , _lowerCAmelCase : int = 4_0 , ) -> List[Any]:
"""simple docstring"""
setattr(processor.image_processor , "apply_ocr" , __lowerCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = processor.tokenizer.num_special_tokens_to_add(__lowerCamelCase )
snake_case_ = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case_ = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case_ = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
snake_case_ = dict(
processor(
__lowerCamelCase , text=__lowerCamelCase , boxes=__lowerCamelCase , return_tensors=__lowerCamelCase , ) )
return inputs
| 159 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case__ : ClassVar[Features] = Features({"""audio""": Audio()} )
snake_case__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
snake_case__ : str = "audio"
snake_case__ : str = "transcription"
def _A ( self : List[str] , __lowerCamelCase : Dict ):
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowerCamelCase ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
UpperCamelCase :int = copy.deepcopy(self )
UpperCamelCase :Any = self.input_schema.copy()
UpperCamelCase :List[str] = features[self.audio_column]
UpperCamelCase :List[Any] = input_schema
return task_template
@property
def _A ( self : Optional[int] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 38 | 0 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
def __init__( self : Tuple , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , UpperCAmelCase , )
super().__init__(args=UpperCAmelCase , **UpperCAmelCase ) | 64 | """simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "efficientnet"
def __init__( self : Dict , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 600 , UpperCAmelCase : float = 2.0 , UpperCAmelCase : float = 3.1 , UpperCAmelCase : int = 8 , UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase : List[int] = [] , UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase : float = 0.2_5 , UpperCAmelCase : str = "swish" , UpperCAmelCase : int = 2560 , UpperCAmelCase : str = "mean" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 0.0_0_1 , UpperCAmelCase : float = 0.9_9 , UpperCAmelCase : float = 0.5 , UpperCAmelCase : float = 0.2 , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : Any = width_coefficient
__lowerCamelCase : Any = depth_coefficient
__lowerCamelCase : Any = depth_divisor
__lowerCamelCase : Optional[Any] = kernel_sizes
__lowerCamelCase : Union[str, Any] = in_channels
__lowerCamelCase : List[Any] = out_channels
__lowerCamelCase : Optional[Any] = depthwise_padding
__lowerCamelCase : int = strides
__lowerCamelCase : int = num_block_repeats
__lowerCamelCase : Optional[Any] = expand_ratios
__lowerCamelCase : int = squeeze_expansion_ratio
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dim
__lowerCamelCase : Union[str, Any] = pooling_type
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Tuple = batch_norm_eps
__lowerCamelCase : Optional[int] = batch_norm_momentum
__lowerCamelCase : Any = dropout_rate
__lowerCamelCase : List[Any] = drop_connect_rate
__lowerCamelCase : int = sum(UpperCAmelCase ) * 4
class _snake_case ( a__ ):
snake_case__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ):
return 1E-5 | 64 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = 0
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__A, __A )
def __magic_name__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : str = Path(__A ) / '''preprocessor_config.json'''
UpperCAmelCase : List[Any] = Path(__A ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) )
UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained(__A )
self.assertIsInstance(__A, __A )
def __magic_name__ ( self : Any ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = Path(__A ) / '''preprocessor_config.json'''
UpperCAmelCase : str = Path(__A ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(__A )
self.assertIsInstance(__A, __A )
def __magic_name__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCAmelCase : List[str] = Path(__A ) / '''preprocessor_config.json'''
UpperCAmelCase : Optional[Any] = Path(__A ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(__A ).to_dict()
config_dict.pop('''image_processor_type''' )
UpperCAmelCase : str = CLIPImageProcessor(**__A )
# save in new folder
model_config.save_pretrained(__A )
config.save_pretrained(__A )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(__A )
# make sure private variable is not incorrectly saved
UpperCAmelCase : int = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__A, __A )
def __magic_name__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Tuple = Path(__A ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), )
UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained(__A )
self.assertIsInstance(__A, __A )
def __magic_name__ ( self : Dict ):
with self.assertRaisesRegex(
__A, '''clip-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase : str = AutoImageProcessor.from_pretrained('''clip-base''' )
def __magic_name__ ( self : List[str] ):
with self.assertRaisesRegex(
__A, R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained(__A, revision='''aaaaaa''' )
def __magic_name__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__A, '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''', ):
UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __magic_name__ ( self : Union[str, Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A )
UpperCAmelCase : str = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__A )
UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(__A, trust_remote_code=__A )
self.assertEqual(reloaded_image_processor.__class__.__name__, '''NewImageProcessor''' )
def __magic_name__ ( self : Any ):
try:
AutoConfig.register('''custom''', __A )
AutoImageProcessor.register(__A, __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoImageProcessor.register(__A, __A )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Dict = Path(__A ) / '''preprocessor_config.json'''
UpperCAmelCase : Any = Path(__A ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''}, open(__A, '''w''' ), )
json.dump({'''model_type''': '''clip'''}, open(__A, '''w''' ) )
UpperCAmelCase : Any = CustomImageProcessor.from_pretrained(__A )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__A )
UpperCAmelCase : int = AutoImageProcessor.from_pretrained(__A )
self.assertIsInstance(__A, __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __magic_name__ ( self : List[Any] ):
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = True
try:
AutoConfig.register('''custom''', __A )
AutoImageProcessor.register(__A, __A )
# If remote code is not set, the default is to use local
UpperCAmelCase : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''', trust_remote_code=__A )
self.assertEqual(image_processor.__class__.__name__, '''NewImageProcessor''' )
self.assertTrue(not hasattr(__A, '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 336 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__a: Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__a: Optional[Any] = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=8 ):
lowercase__ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=512 , UpperCAmelCase=512 ):
lowercase__ : str = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase__ : Optional[int] = np.array(pil_image.convert('''RGB''' ) )
lowercase__ : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
lowercase__ : Optional[Any] = np.transpose(UpperCAmelCase , [2, 0, 1] )
lowercase__ : Any = torch.from_numpy(UpperCAmelCase ).unsqueeze(0 )
return image
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Tuple:
super().__init__()
self.register_modules(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , )
lowercase__ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
# get the original timestep using init_timestep
lowercase__ : Optional[Any] = min(int(num_inference_steps * strength ) , __lowerCAmelCase )
lowercase__ : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
lowercase__ : Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> Any:
if not isinstance(__lowerCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
lowercase__ : Dict = image.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
lowercase__ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase__ : List[Any] = image
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
lowercase__ : Any = torch.cat(__lowerCAmelCase , dim=0 )
else:
lowercase__ : Dict = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
lowercase__ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase__ : Optional[int] = torch.cat([init_latents] , dim=0 )
lowercase__ : Dict = init_latents.shape
lowercase__ : List[str] = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
# get latents
lowercase__ : Dict = self.scheduler.add_noise(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = init_latents
return latents
def _lowerCAmelCase( self , __lowerCAmelCase=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ : Tuple = torch.device(F"""cuda:{gpu_id}""" )
lowercase__ : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase__ : int = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ : Optional[Any] = cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
lowercase__ : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCAmelCase( self ) -> Optional[int]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 512 , __lowerCAmelCase = 512 , __lowerCAmelCase = 100 , __lowerCAmelCase = 4.0 , __lowerCAmelCase = 0.3 , __lowerCAmelCase = 1 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , ) -> int:
lowercase__ : List[Any] = self._execution_device
lowercase__ : Tuple = guidance_scale > 1.0
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : List[str] = torch.cat(__lowerCAmelCase , dim=0 )
lowercase__ : List[Any] = image_embeds.shape[0]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : int = torch.cat(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
lowercase__ : int = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
lowercase__ : int = negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
lowercase__ : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : int = [image]
if not all(isinstance(__lowerCAmelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase__ : int = torch.cat([prepare_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for i in image] , dim=0 )
lowercase__ : Optional[int] = image.to(dtype=image_embeds.dtype , device=__lowerCAmelCase )
lowercase__ : str = self.movq.encode(__lowerCAmelCase )['''latents''']
lowercase__ : str = latents.repeat_interleave(__lowerCAmelCase , dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
lowercase__ , lowercase__ : int = self.get_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase__ , lowercase__ : Tuple = downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor )
lowercase__ : int = self.prepare_latents(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ : Optional[Any] = {'''image_embeds''': image_embeds}
lowercase__ : Union[str, Any] = self.unet(
sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ : Any = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ : List[Any] = noise_pred.chunk(2 )
lowercase__ , lowercase__ : Union[str, Any] = variance_pred.chunk(2 )
lowercase__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : int = self.scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0]
# post-processing
lowercase__ : Union[str, Any] = self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase__ : List[Any] = image * 0.5 + 0.5
lowercase__ : List[str] = image.clamp(0 , 1 )
lowercase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ : Tuple = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 214 | '''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a: Union[str, Any] = logging.get_logger(__name__)
__a: Tuple = {"""tokenizer_file""": """tokenizer.json"""}
__a: Union[str, Any] = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = None
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=False , __lowerCAmelCase=False , **__lowerCAmelCase , ) -> Union[str, Any]:
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
lowercase__ : int = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
lowercase__ : Tuple = add_prefix_space
lowercase__ : List[str] = pre_tok_class(**__lowerCAmelCase )
lowercase__ : Union[str, Any] = add_prefix_space
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : str = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
lowercase__ : List[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[int]:
lowercase__ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [self.eos_token_id] )
if len(__lowerCAmelCase ) > self.model_max_length:
lowercase__ : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 214 | 1 |
"""simple docstring"""
class lowercase :
def __init__( self : int , _lowerCamelCase : str ):
"""simple docstring"""
A_ : Dict = arr.split(''',''' )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Optional[int] = [int(self.array[0] )] * len(self.array )
A_ : Tuple = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
A_ : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
A_ : List[Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = input('please input some numbers:')
_lowerCamelCase : Dict = SubArray(whole_array)
_lowerCamelCase : Optional[Any] = array.solve_sub_array()
print(('the results is:', re))
| 167 |
"""simple docstring"""
from manim import *
class lowercase ( __UpperCAmelCase):
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = Rectangle(height=0.5 , width=0.5 )
A_ : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : Optional[int] = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : List[str] = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Dict = Text('''CPU''' , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
A_ : Optional[int] = [mem.copy() for i in range(1 )]
A_ : int = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : List[str] = Text('''GPU''' , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.align_to(_lowerCamelCase , _lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCamelCase )
A_ : List[Any] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = Text('''Model''' , font_size=24 )
A_ : Optional[int] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , )
A_ : List[str] = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
A_ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Dict = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=2.5 ) , Write(_lowerCamelCase ) , Write(_lowerCamelCase ) )
self.add(_lowerCamelCase )
A_ : str = []
A_ : Any = []
A_ : Tuple = []
for i, rect in enumerate(_lowerCamelCase ):
A_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 )
cpu_target.move_to(_lowerCamelCase )
cpu_target.generate_target()
A_ : List[str] = 0.46 / 4
A_ : List[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowerCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowerCamelCase , buff=0.0 )
cpu_targs.append(_lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCamelCase ) )
second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 167 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = 1
lowerCAmelCase_ = 3
lowerCAmelCase_ = (3_2, 3_2)
lowerCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowercase ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
return model
@property
def _lowercase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _lowercase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__UpperCAmelCase )
@property
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
def extract(*lowercase_ , **lowercase_ ):
class a_ :
'''simple docstring'''
def __init__( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = torch.ones([0] )
def _lowercase ( self , lowercase_ ) -> Optional[int]:
'''simple docstring'''
self.pixel_values.to(__UpperCAmelCase )
return self
return Out()
return extract
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase_ = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=__UpperCAmelCase , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase_ = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=__UpperCAmelCase , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert isinstance(pipe.scheduler , __UpperCAmelCase )
assert pipe.safety_checker is None
lowerCAmelCase_ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase_ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
lowerCAmelCase_ = unet.half()
lowerCAmelCase_ = vae.half()
lowerCAmelCase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase_ = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=__UpperCAmelCase )
lowerCAmelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase_ = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowerCAmelCase_ = 4_0_0_3_6_6_0_3_4_6
lowerCAmelCase_ = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase_ = torch.manual_seed(__UpperCAmelCase )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCAmelCase_ = torch.manual_seed(__UpperCAmelCase )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=__UpperCAmelCase )
lowerCAmelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase_ = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowerCAmelCase_ = 2_7_3_4_9_7_1_7_5_5
lowerCAmelCase_ = 7
lowerCAmelCase_ = torch.manual_seed(__UpperCAmelCase )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCAmelCase_ = torch.manual_seed(__UpperCAmelCase )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
lowerCAmelCase_ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase_ = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowerCAmelCase_ = 1_0_4_4_3_5_5_2_3_4
lowerCAmelCase_ = 1_2
lowerCAmelCase_ = torch.manual_seed(__UpperCAmelCase )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCAmelCase_ = torch.manual_seed(__UpperCAmelCase )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 365 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: int
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = [[] for _ in range(lowercase_ )]
lowerCAmelCase_ = size
def __getitem__( self , lowercase_ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self._size
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
lowerCAmelCase_ = deque([start_vertex] )
lowerCAmelCase_ = [None] * self.size
lowerCAmelCase_ = 0
while queue:
lowerCAmelCase_ = queue.popleft()
lowerCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ = current_distance + edge.weight
lowerCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """vivit"""
def __init__( self , lowerCAmelCase_=2_24 , lowerCAmelCase_=32 , lowerCAmelCase_=[2, 16, 16] , lowerCAmelCase_=3 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu_fast" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-06 , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = num_frames
_snake_case = tubelet_size
_snake_case = num_channels
_snake_case = qkv_bias
super().__init__(**lowerCAmelCase_ )
| 42 | from manim import *
class A ( UpperCAmelCase_ ):
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("CPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(4 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("GPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Model" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = fill.copy().set_fill(__UpperCAmelCase , opacity=0.8 )
target.move_to(__UpperCAmelCase )
model_arr.append(__UpperCAmelCase )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Disk" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) )
UpperCAmelCase__ = Square(0.3 )
input.set_fill(__UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __UpperCAmelCase , buff=0.5 )
self.play(Write(__UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(__UpperCAmelCase ) )
self.play(FadeOut(__UpperCAmelCase ) )
UpperCAmelCase__ = Arrow(start=__UpperCAmelCase , end=__UpperCAmelCase , color=__UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCAmelCase__ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
UpperCAmelCase__ = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(__UpperCAmelCase ) , Circumscribe(model_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCAmelCase__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCAmelCase__ = AnimationGroup(
FadeOut(__UpperCAmelCase , run_time=0.5 ) , MoveToTarget(__UpperCAmelCase , run_time=0.5 ) , FadeIn(__UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCAmelCase__ = 0.7
self.play(
Circumscribe(model_arr[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCAmelCase__ = a_c
UpperCAmelCase__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__UpperCAmelCase ) , FadeOut(__UpperCAmelCase , run_time=0.5 ) , )
UpperCAmelCase__ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , MoveToTarget(__UpperCAmelCase ) )
self.wait()
| 65 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
_SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE__ = model_type_to_module_name(_A )
SCREAMING_SNAKE_CASE__ = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(_A , _A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_A , '''__name__''' , _A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE__ = importlib.import_module('''transformers''' )
if hasattr(_A , _A ):
return getattr(_A , _A )
return None
def UpperCAmelCase_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_file_from_repo(
_A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(_A , encoding='''utf-8''' ) as reader:
return json.load(_A )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] ) -> int:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def lowercase_ ( cls : Optional[int] , __lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = kwargs.pop('''config''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''trust_remote_code''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = config_dict.get('''image_processor_type''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
SCREAMING_SNAKE_CASE__ = config_dict.pop('''feature_extractor_type''' , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
SCREAMING_SNAKE_CASE__ = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
SCREAMING_SNAKE_CASE__ = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , '''image_processor_type''' , __lowerCamelCase )
if hasattr(__lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
SCREAMING_SNAKE_CASE__ = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
SCREAMING_SNAKE_CASE__ = image_processor_class_from_name(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processor_auto_map is not None
SCREAMING_SNAKE_CASE__ = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
SCREAMING_SNAKE_CASE__ = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE__ = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''code_revision''' , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
SCREAMING_SNAKE_CASE__ = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowercase_ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> str:
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 218 |
from __future__ import annotations
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(_A ) // 2
# choose the middle 3 elements
SCREAMING_SNAKE_CASE__ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 1 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = range_bbox
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ):
'''simple docstring'''
_snake_case = LiltModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LiltForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ):
'''simple docstring'''
_snake_case = LiltForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return True
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LiltModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LiltModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase )
_snake_case = torch.tensor([[1, 2]] , device=lowercase )
_snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(input_ids=lowercase , bbox=lowercase )
_snake_case = torch.Size([1, 2, 768] )
_snake_case = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) ) | 282 | 0 |
def lowercase_ ( A__ ):
"""simple docstring"""
if len(lowerCamelCase_ ) <= 1:
return [tuple(lowerCamelCase_ )]
snake_case = []
def generate(A__ , A__ ):
snake_case = [0] * n
res.append(tuple(lowerCamelCase_ ) )
snake_case = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
snake_case = arr[i], arr[0]
else:
snake_case = arr[i], arr[c[i]]
res.append(tuple(lowerCamelCase_ ) )
c[i] += 1
snake_case = 0
else:
snake_case = 0
i += 1
generate(len(lowerCamelCase_ ) , lowerCamelCase_ )
return res
if __name__ == "__main__":
_A = input("Enter numbers separated by a comma:\n").strip()
_A = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 356 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowercase_ ( A__ , A__ ) -> int:
"""simple docstring"""
snake_case = RobertaPreLayerNormConfig.from_pretrained(
A__ , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
snake_case = torch.load(hf_hub_download(repo_id=A__ , filename="pytorch_model.bin" ) )
snake_case = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
snake_case = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
snake_case = tensor_value
snake_case = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=A__ , config=A__ , state_dict=A__ )
model.save_pretrained(A__ )
# convert tokenizer
snake_case = AutoTokenizer.from_pretrained(A__ )
tokenizer.save_pretrained(A__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_A = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 137 | 0 |
from pathlib import Path
import fire
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Any ,__UpperCamelCase : int ):
lowerCAmelCase_ : Union[str, Any] = Path(__UpperCamelCase )
lowerCAmelCase_ : str = Path(__UpperCamelCase )
dest_dir.mkdir(exist_ok=__UpperCamelCase )
for path in src_dir.iterdir():
lowerCAmelCase_ : List[Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase_ : Tuple = dest_dir.joinpath(path.name )
print(__UpperCamelCase )
dest_path.open('''w''' ).write('''\n'''.join(__UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 103 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
SCREAMING_SNAKE_CASE:str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
SCREAMING_SNAKE_CASE:Dict = components[:-1] + [test_fn.replace(".py" , "" )]
SCREAMING_SNAKE_CASE:str = ".".join(snake_case )
return test_module_path
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_module_path(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = importlib.import_module(snake_case )
return test_module
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:List[Any] = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:int = get_test_module(snake_case )
for attr in dir(snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE:Union[str, Any] = getattr(snake_case , "all_model_classes" , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = test_class()
if hasattr(snake_case , "setUp" ):
test.setUp()
SCREAMING_SNAKE_CASE:str = None
if hasattr(snake_case , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE:Tuple = test.model_tester.__class__
return model_tester
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes_for_model(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE:Dict = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Dict = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Tuple = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def A_ ( snake_case ):
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 139 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__A : List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _A : int=None , _A : List[Any]=None ) ->Dict:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_A )
@dataclass
class _SCREAMING_SNAKE_CASE :
_UpperCamelCase:List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
_UpperCamelCase:List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"})
_UpperCamelCase:List[int] = list_field(
default=[8, 32, 1_28, 5_12] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
_UpperCamelCase:bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
_UpperCamelCase:bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
_UpperCamelCase:bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."})
_UpperCamelCase:bool = field(default=lowerCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."})
_UpperCamelCase:bool = field(default=lowerCAmelCase__ , metadata={"help": "Benchmark training of model"})
_UpperCamelCase:bool = field(default=lowerCAmelCase__ , metadata={"help": "Verbose memory tracing"})
_UpperCamelCase:bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
_UpperCamelCase:bool = field(
default=lowerCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
_UpperCamelCase:bool = field(default=lowerCAmelCase__ , metadata={"help": "Trace memory line by line"})
_UpperCamelCase:bool = field(default=lowerCAmelCase__ , metadata={"help": "Save result to a CSV file"})
_UpperCamelCase:bool = field(default=lowerCAmelCase__ , metadata={"help": "Save all print statements in a log file"})
_UpperCamelCase:bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether to print environment information"})
_UpperCamelCase:bool = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
_UpperCamelCase:str = field(
default=F"inference_time_{round(time())}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , )
_UpperCamelCase:str = field(
default=F"inference_memory_{round(time())}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
_UpperCamelCase:str = field(
default=F"train_time_{round(time())}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
_UpperCamelCase:str = field(
default=F"train_memory_{round(time())}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
_UpperCamelCase:str = field(
default=F"env_info_{round(time())}.csv" , metadata={"help": "CSV filename used if saving environment information."} , )
_UpperCamelCase:str = field(
default=F"log_{round(time())}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , )
_UpperCamelCase:int = field(default=3 , metadata={"help": "Times an experiment will be run."})
_UpperCamelCase:bool = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def _snake_case ( self )-> Union[str, Any]:
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , _SCREAMING_SNAKE_CASE , )
def _snake_case ( self )-> int:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _snake_case ( self )-> List[str]:
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def _snake_case ( self )-> Union[str, Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 354 |
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE )-> None:
lowerCamelCase_ =data
lowerCamelCase_ =None
lowerCamelCase_ =None
def __UpperCamelCase ( _A : Node | None ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __UpperCamelCase ( _A : Node | None ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __UpperCamelCase ( _A : Node ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __UpperCamelCase ( ) ->None: # Main function for testing.
"""simple docstring"""
lowerCamelCase_ =Node(1 )
lowerCamelCase_ =Node(2 )
lowerCamelCase_ =Node(3 )
lowerCamelCase_ =Node(4 )
lowerCamelCase_ =Node(5 )
lowerCamelCase_ =Node(6 )
lowerCamelCase_ =Node(7 )
lowerCamelCase_ =Node(8 )
lowerCamelCase_ =Node(9 )
print(is_full_binary_tree(_A ) )
print(depth_of_tree(_A ) )
print("""Tree is: """ )
display(_A )
if __name__ == "__main__":
main()
| 49 | 0 |
"""simple docstring"""
lowercase__ = range(2, 20 + 1)
lowercase__ = [10**k for k in range(ks[-1] + 1)]
lowercase__ = {}
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[Any] = sum(a_i[j] for j in range(lowercase__ , len(lowercase__ ) ) )
_lowerCamelCase : Dict = sum(a_i[j] * base[j] for j in range(min(len(lowercase__ ) , lowercase__ ) ) )
_lowerCamelCase, _lowerCamelCase : Dict = 0, 0
_lowerCamelCase : List[str] = n - i
_lowerCamelCase : Optional[int] = memo.get(lowercase__ )
if sub_memo is not None:
_lowerCamelCase : Tuple = sub_memo.get(lowercase__ )
if jumps is not None and len(lowercase__ ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : Optional[Any] = -1
for _k in range(len(lowercase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Dict = _k
break
if max_jump >= 0:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : Any = diff + c
for j in range(min(lowercase__ , len(lowercase__ ) ) ):
_lowerCamelCase, _lowerCamelCase : Tuple = divmod(lowercase__ , 10 )
if new_c > 0:
add(lowercase__ , lowercase__ , lowercase__ )
else:
_lowerCamelCase : List[Any] = []
else:
_lowerCamelCase : List[str] = {c: []}
_lowerCamelCase : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase, _lowerCamelCase : Any = next_term(lowercase__ , k - 1 , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase, _lowerCamelCase : str = compute(lowercase__ , lowercase__ , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
_lowerCamelCase : Union[str, Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : Dict = 0
while j < len(lowercase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowercase__ , (diff, dn, k) )
return (diff, dn)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if i >= n:
return 0, i
if k > len(lowercase__ ):
a_i.extend([0 for _ in range(k - len(lowercase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : int = i
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = 0, 0, 0
for j in range(len(lowercase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : Optional[int] = 0
for j in range(lowercase__ ):
_lowerCamelCase : Dict = a_i[j] + addend
_lowerCamelCase, _lowerCamelCase : List[Any] = divmod(lowercase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowercase__ , lowercase__ , lowercase__ )
return diff, i - start_i
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
for j in range(lowercase__ , len(lowercase__ ) ):
_lowerCamelCase : int = digits[j] + addend
if s >= 10:
_lowerCamelCase, _lowerCamelCase : Dict = divmod(lowercase__ , 10 )
_lowerCamelCase : Union[str, Any] = addend // 10 + quotient
else:
_lowerCamelCase : Dict = s
_lowerCamelCase : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase, _lowerCamelCase : Dict = divmod(lowercase__ , 10 )
digits.append(lowercase__ )
def _snake_case ( lowercase__ = 10**15 ):
_lowerCamelCase : Optional[Any] = [1]
_lowerCamelCase : int = 1
_lowerCamelCase : Any = 0
while True:
_lowerCamelCase, _lowerCamelCase : Dict = next_term(lowercase__ , 20 , i + dn , lowercase__ )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : int = 0
for j in range(len(lowercase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }") | 96 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Any = data
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : str = None
_lowerCamelCase : str = None
def __iter__( self ):
_lowerCamelCase : List[str] = self.head
while self.head:
yield node.data
_lowerCamelCase : Optional[int] = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(lowercase ) for item in iter(self ) )
def A_ ( self , lowercase ):
self.insert_nth(len(self ) , lowercase )
def A_ ( self , lowercase ):
self.insert_nth(0 , lowercase )
def A_ ( self , lowercase , lowercase ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : List[Any] = Node(lowercase )
if self.head is None:
_lowerCamelCase : str = new_node # first node points itself
_lowerCamelCase : Union[str, Any] = new_node
elif index == 0: # insert at head
_lowerCamelCase : List[str] = self.head
_lowerCamelCase : str = new_node
else:
_lowerCamelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : Union[str, Any] = temp.next
_lowerCamelCase : List[str] = new_node
if index == len(self ) - 1: # insert at tail
_lowerCamelCase : Any = new_node
def A_ ( self ):
return self.delete_nth(0 )
def A_ ( self ):
return self.delete_nth(len(self ) - 1 )
def A_ ( self , lowercase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : Any = self.head
if self.head == self.tail: # just one node
_lowerCamelCase : List[str] = None
elif index == 0: # delete head node
_lowerCamelCase : List[str] = self.tail.next.next
_lowerCamelCase : Optional[int] = self.head.next
else:
_lowerCamelCase : Dict = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : int = temp.next
_lowerCamelCase : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
_lowerCamelCase : List[Any] = temp
return delete_node.data
def A_ ( self ):
return len(self ) == 0
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_A = logging.get_logger(__name__)
_A = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_A = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_A = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_A = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_A = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_A = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_A = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_A = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_A = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_A = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_A = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_A = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_A = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_A = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : Any = FLAX_MODEL_MAPPING
_A = auto_class_update(FlaxAutoModel)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_A = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : str = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_A = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_A = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_A = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_A = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_A = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_A = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_A = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : int = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_A = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : List[str] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_A = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_A = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
UpperCAmelCase__ : Dict = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_A = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 350 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ViTFeatureExtractor']
_A = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =MgpstrTokenizer
a__ =False
a__ ={}
a__ =False
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
# fmt: off
_UpperCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_UpperCAmelCase : Optional[Any] = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
def __lowerCAmelCase ( self , **A ) -> List[str]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , A ) -> Dict:
_UpperCAmelCase : int = '''tester'''
_UpperCAmelCase : Tuple = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase : Optional[int] = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_UpperCAmelCase : List[str] = tokenizer.encode([special_token] , add_special_tokens=A )
self.assertEqual(len(A ) , 1 )
_UpperCAmelCase : Union[str, Any] = tokenizer.decode(A , skip_special_tokens=A )
self.assertTrue(special_token not in decoded )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.get_input_output_texts(A )
_UpperCAmelCase : str = tokenizer.tokenize(A )
_UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(A )
_UpperCAmelCase : Dict = tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
_UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(A )
self.assertNotEqual(len(A ) , 0 )
_UpperCAmelCase : List[Any] = tokenizer.decode(A )
self.assertIsInstance(A , A )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , A )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
| 263 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''microsoft/speecht5_tts'''
__A = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
__A = '''text_reader'''
__A = SpeechTaProcessor
__A = SpeechTaForTextToSpeech
__A = SpeechTaHifiGan
__A = ['''text''']
__A = ['''audio''']
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
if self.post_processor is None:
_UpperCamelCase = "microsoft/speecht5_hifigan"
super().setup()
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=None) -> Any:
"""simple docstring"""
_UpperCamelCase = self.pre_processor(text=lowercase_ , return_tensors="pt" , truncation=lowercase_)
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings.")
_UpperCamelCase = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation")
_UpperCamelCase = torch.tensor(embeddings_dataset[7305]["xvector"]).unsqueeze(0)
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCAmelCase ( self : Tuple , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**lowercase_)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]) -> List[str]:
"""simple docstring"""
with torch.no_grad():
return self.post_processor(lowercase_).cpu().detach()
| 63 | from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ['''image_processor''', '''tokenizer''']
__A = '''BridgeTowerImageProcessor'''
__A = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : List[Any] , lowercase_ : Dict , lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
super().__init__(lowercase_ , lowercase_)
def __call__( self : Any , lowercase_ : List[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : str , ) -> BatchEncoding:
"""simple docstring"""
_UpperCamelCase = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel_values + pixel_mask
_UpperCamelCase = self.image_processor(
lowercase_ , return_tensors=lowercase_ , do_normalize=lowercase_ , do_center_crop=lowercase_ , **lowercase_)
encoding.update(lowercase_)
return encoding
def __UpperCAmelCase ( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : int) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 63 | 1 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCamelCase ( *UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
a_ = list(UpperCAmelCase )
for i in range(len(UpperCAmelCase ) ):
a_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCamelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
a_ = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCamelCase ( UpperCAmelCase = None , UpperCAmelCase = 128 ) ->List[Any]:
"""simple docstring"""
if function is None:
return functools.partial(UpperCAmelCase , starting_batch_size=UpperCAmelCase )
a_ = starting_batch_size
def decorator(*UpperCAmelCase , **UpperCAmelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
a_ = list(inspect.signature(UpperCAmelCase ).parameters.keys() )
# Guard against user error
if len(UpperCAmelCase ) < (len(UpperCAmelCase ) + 1):
a_ = ", ".join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
except Exception as e:
if should_reduce_batch_size(UpperCAmelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 243 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : int = (KDPMaDiscreteScheduler,)
a_ : List[str] = 10
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Tuple:
a_ = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__UpperCAmelCase)
return config
def UpperCAmelCase__ ( self) ->Optional[Any]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02]):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(prediction_type="v_prediction")
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma
a_ = sample.to(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(__UpperCAmelCase))
a_ = torch.mean(torch.abs(__UpperCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07) < 1E-2
assert abs(result_mean.item() - 6.1112E-10) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07) < 1E-2
assert abs(result_mean.item() - 0.0_002) < 1E-3
def UpperCAmelCase__ ( self) ->str:
if torch_device == "mps":
return
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma
a_ = sample.to(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(__UpperCAmelCase))
a_ = torch.mean(torch.abs(__UpperCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
def UpperCAmelCase__ ( self) ->Any:
if torch_device == "mps":
return
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter.to(__UpperCAmelCase) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a_ = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(__UpperCAmelCase))
a_ = torch.mean(torch.abs(__UpperCAmelCase))
if str(__UpperCAmelCase).startswith("cpu"):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3 | 243 | 1 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowercase : Union[str, Any] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_2_8,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 5_0,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 1_0,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 1_0,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case ( cls : Dict )-> Optional[Any]:
lowerCamelCase__ : str =TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def snake_case ( cls : str )-> List[str]:
try:
delete_repo(token=cls._token, repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def snake_case ( self : Dict )-> int:
lowerCamelCase__ : Union[str, Any] =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
config.push_to_hub('''test-config''', use_auth_token=self._token )
lowerCamelCase__ : Dict =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase, repo_id='''test-config''', push_to_hub=lowerCamelCase, use_auth_token=self._token )
lowerCamelCase__ : Dict =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase ) )
def snake_case ( self : Optional[int] )-> str:
lowerCamelCase__ : str =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''', use_auth_token=self._token )
lowerCamelCase__ : List[str] =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase, repo_id='''valid_org/test-config-org''', push_to_hub=lowerCamelCase, use_auth_token=self._token )
lowerCamelCase__ : List[str] =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase ) )
def snake_case ( self : Dict )-> Tuple:
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Tuple =CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
lowerCamelCase__ : Any =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, '''CustomConfig''' )
self.assertEqual(new_config.attribute, 42 )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[Any] )-> int:
lowerCamelCase__ : int =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : str =c.n_embd + 1 # int
lowerCamelCase__ : str =c.resid_pdrop + 1.0 # float
lowerCamelCase__ : Tuple =not c.scale_attn_weights # bool
lowerCamelCase__ : Dict =c.summary_type + '''foo''' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase, c.n_embd, '''mismatch for key: n_embd''' )
self.assertEqual(lowerCamelCase, c.resid_pdrop, '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCamelCase, c.scale_attn_weights, '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCamelCase, c.summary_type, '''mismatch for key: summary_type''' )
def snake_case ( self : Union[str, Any] )-> List[Any]:
lowerCamelCase__ : List[str] =PretrainedConfig()
lowerCamelCase__ : Optional[int] =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase, ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
lowerCamelCase__ : List[str] =[key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase, lowerCamelCase )]
if len(lowerCamelCase ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F''' {", ".join(lowerCamelCase )}.''' )
def snake_case ( self : List[Any] )-> Any:
with self.assertRaises(lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Optional[Any] =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
lowerCamelCase__ : Tuple =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''', subfolder='''bert''' )
self.assertIsNotNone(lowerCamelCase )
def snake_case ( self : List[str] )-> Dict:
# A mock response for an HTTP head request to emulate server down
lowerCamelCase__ : str =mock.Mock()
lowerCamelCase__ : Optional[int] =500
lowerCamelCase__ : Optional[int] ={}
lowerCamelCase__ : Union[str, Any] =HTTPError
lowerCamelCase__ : str ={}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Optional[Any] =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=lowerCamelCase ) as mock_head:
lowerCamelCase__ : Union[str, Any] =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case ( self : Optional[int] )-> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase__ : int =BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : List[str] =AutoConfig.from_pretrained('''bert-base-cased''' )
lowerCamelCase__ : Union[str, Any] =['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase, '''config.4.0.0.json''' ), '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Tuple =['''config.42.0.0.json''']
lowerCamelCase__ : str =768
configuration.save_pretrained(lowerCamelCase )
shutil.move(os.path.join(lowerCamelCase, '''config.4.0.0.json''' ), os.path.join(lowerCamelCase, '''config.42.0.0.json''' ) )
lowerCamelCase__ : List[Any] =AutoConfig.from_pretrained(lowerCamelCase )
self.assertEqual(new_configuration.hidden_size, 768 )
def snake_case ( self : List[Any] )-> List[Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase__ : List[str] ='''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
lowerCamelCase__ : str ='''v4.0.0'''
lowerCamelCase__ , lowerCamelCase__ : Dict =new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase, return_unused_kwargs=lowerCamelCase )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict ='''v3.0.0'''
lowerCamelCase__ : Tuple =old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase )
self.assertEqual(old_configuration.hidden_size, 768 )
| 272 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ : Optional[Any] ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
lowerCamelCase__ : Any =f'''{src_lang}-{tgt_lang}'''
lowerCamelCase__ : Any =f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
lowerCamelCase__ : str =os.path.join(__lowerCamelCase , '''README.md''' )
print(f'''Generating {path}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__lowerCamelCase )
# make sure we are under the root of the project
_lowercase : List[str] = Path(__file__).resolve().parent.parent.parent
_lowercase : Dict = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowercase : int = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 272 | 1 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ComputeEnvironment.AMAZON_SAGEMAKER
__lowerCamelCase = True
__lowerCamelCase = 'ml.p3.2xlarge'
__lowerCamelCase = 'accelerate_sagemaker_execution_role'
__lowerCamelCase = 'hf-sm'
__lowerCamelCase = 'us-east-1'
__lowerCamelCase = 1
__lowerCamelCase = 'accelerate-sagemaker-1'
__lowerCamelCase = '1.6'
__lowerCamelCase = '4.4'
__lowerCamelCase = 'train.py'
__lowerCamelCase = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
__lowerCamelCase = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , lowercase )
assert isinstance(converted_args["do_train"] , lowercase )
assert isinstance(converted_args["epochs"] , lowercase )
assert isinstance(converted_args["learning_rate"] , lowercase )
assert isinstance(converted_args["max_steps"] , lowercase )
with pytest.raises(lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 68 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a = logging.get_logger(__name__)
a = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'dpt'
def __init__( self : int , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=3072 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : str=0.02 , lowerCAmelCase : str=1e-12 , lowerCAmelCase : Optional[Any]=384 , lowerCAmelCase : str=16 , lowerCAmelCase : int=3 , lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=True , lowerCAmelCase : Tuple=[2, 5, 8, 11] , lowerCAmelCase : Tuple="project" , lowerCAmelCase : Optional[int]=[4, 2, 1, 0.5] , lowerCAmelCase : Any=[96, 192, 384, 768] , lowerCAmelCase : int=256 , lowerCAmelCase : List[Any]=-1 , lowerCAmelCase : Any=False , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=0.4 , lowerCAmelCase : Dict=255 , lowerCAmelCase : int=0.1 , lowerCAmelCase : List[Any]=[1, 1024, 24, 24] , lowerCAmelCase : str=[0, 1] , lowerCAmelCase : str=None , **lowerCAmelCase : Optional[Any] , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_size
lowerCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowerCAmelCase = BitConfig(**lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase = BitConfig(**lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCAmelCase = backbone_featmap_shape
lowerCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = []
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
lowerCAmelCase = readout_type
lowerCAmelCase = reassemble_factors
lowerCAmelCase = neck_hidden_sizes
lowerCAmelCase = fusion_hidden_size
lowerCAmelCase = head_in_index
lowerCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase = use_auxiliary_head
lowerCAmelCase = auxiliary_loss_weight
lowerCAmelCase = semantic_loss_ignore_index
lowerCAmelCase = semantic_classifier_dropout
def __lowercase ( self : Any ):
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 155 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCamelCase_ ( _a : str , _a : str , **_a : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(_a , **_a )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(_a )
model.save_pretrained(_a )
AutoTokenizer.from_pretrained(_a ).save_pretrained(_a )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 364 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _snake_case :
'''simple docstring'''
@staticmethod
def A__ ( *lowerCamelCase_: List[Any] ,**lowerCamelCase_: Tuple ) -> Tuple:
pass
def lowerCamelCase_ ( _a : Image ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = DepthEstimationPipeline(model=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> str:
UpperCAmelCase_ : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} ,lowerCamelCase_ )
import datasets
UpperCAmelCase_ : Union[str, Any] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" ,"""image""" ,split="""test""" )
UpperCAmelCase_ : List[str] = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] ,lowerCamelCase_ ,)
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def A__ ( self: List[str] ) -> Any:
pass
@slow
@require_torch
def A__ ( self: int ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = """Intel/dpt-large"""
UpperCAmelCase_ : str = pipeline("""depth-estimation""" ,model=lowerCamelCase_ )
UpperCAmelCase_ : Dict = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
UpperCAmelCase_ : Dict = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) ,2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) ,2.6_6_2 )
@require_torch
def A__ ( self: List[str] ) -> Any:
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 59 | 0 |
def __UpperCamelCase ( _A : str , _A : str ) ->str:
"""simple docstring"""
if not (isinstance(_A , _A ) and isinstance(_A , _A )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
lowerCamelCase_ =len(_A )
lowerCamelCase_ =len(_A )
lowerCamelCase_ =[[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCamelCase_ =0
lowerCamelCase_ =0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCamelCase_ =1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCamelCase_ =i
lowerCamelCase_ =dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 |
def __UpperCamelCase ( _A : int ) ->int:
"""simple docstring"""
assert (
isinstance(_A , _A ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
lowerCamelCase_ , lowerCamelCase_ =1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase_ , lowerCamelCase_ =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=3 , snake_case_=0.6 , snake_case_=None , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Dict = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : str = mask_ratio
UpperCAmelCase_ : str = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase_ : Optional[Any] = (image_size // patch_size) ** 2
UpperCAmelCase_ : Union[str, Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = ViTMAEModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ViTMAEForPreTraining(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Tuple = model(snake_case_ )
UpperCAmelCase_ : Dict = (self.image_size // self.patch_size) ** 2
UpperCAmelCase_ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : int = ViTMAEForPreTraining(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = model(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :Dict = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase_ :Tuple = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
lowerCamelCase_ :str = False
lowerCamelCase_ :Optional[int] = False
lowerCamelCase_ :str = False
lowerCamelCase_ :int = False
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = ViTMAEModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(snake_case_ )
UpperCAmelCase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Dict = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase_ : int = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase_ : Optional[int] = torch.from_numpy(snake_case_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase_ : Optional[int] = pt_noise
super().check_pt_tf_models(snake_case_ , snake_case_ , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCAmelCase_ : List[str] = outputs[0].cpu().numpy()
UpperCAmelCase_ : Tuple = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
UpperCAmelCase_ : List[Any] = model_class.from_pretrained(snake_case_ )
model.to(snake_case_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
# Make sure we don't have nans
UpperCAmelCase_ : Optional[Any] = after_outputs[0].cpu().numpy()
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = ViTMAEModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase_ : Dict = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(snake_case_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Optional[int] = image_processor(images=snake_case_ , return_tensors='pt' ).to(snake_case_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase_ : List[str] = ViTMAEConfig()
UpperCAmelCase_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase_ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : int = model(**snake_case_ , noise=torch.from_numpy(snake_case_ ).to(device=snake_case_ ) )
# verify the logits
UpperCAmelCase_ : Any = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , snake_case_ )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case_ ) , atol=1E-4 ) )
| 356 | '''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Union[str, Any] = 1
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = None ):
'''simple docstring'''
self.set_timesteps(snake_case_ )
# standard deviation of the initial noise distribution
UpperCAmelCase_ : Union[str, Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCAmelCase_ : int = 4
# running values
UpperCAmelCase_ : str = []
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
UpperCAmelCase_ : int = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCAmelCase_ : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCAmelCase_ : Optional[int] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCAmelCase_ : Tuple = torch.sin(steps * math.pi / 2 ) ** 2
UpperCAmelCase_ : Dict = (1.0 - self.betas**2) ** 0.5
UpperCAmelCase_ : str = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCAmelCase_ : str = timesteps.to(snake_case_ )
UpperCAmelCase_ : Any = []
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
UpperCAmelCase_ : Any = (self.timesteps == timestep).nonzero().item()
UpperCAmelCase_ : Optional[Any] = timestep_index + 1
UpperCAmelCase_ : Dict = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(snake_case_ )
if len(self.ets ) == 1:
UpperCAmelCase_ : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
UpperCAmelCase_ : Any = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCAmelCase_ : List[str] = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
UpperCAmelCase_ : Union[str, Any] = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
UpperCAmelCase_ : Union[str, Any] = self._get_prev_sample(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def _UpperCamelCase ( self , snake_case_ , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return sample
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = self.alphas[timestep_index]
UpperCAmelCase_ : Union[str, Any] = self.betas[timestep_index]
UpperCAmelCase_ : Any = self.alphas[prev_timestep_index]
UpperCAmelCase_ : Dict = self.betas[prev_timestep_index]
UpperCAmelCase_ : List[Any] = (sample - sigma * ets) / max(snake_case_ , 1E-8 )
UpperCAmelCase_ : Tuple = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 274 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCAmelCase__ = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase : Optional[int] = make_dataset()
def lowerCAmelCase ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
for triplet in permutations(_lowerCAmelCase , 3 ):
if sum(_lowerCAmelCase ) == target:
return tuple(sorted(_lowerCAmelCase ) )
return (0, 0, 0)
def lowerCAmelCase ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
arr.sort()
UpperCAmelCase__ = len(_lowerCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCAmelCase__ = "\ntriplet_sum1(*dataset)\n"
UpperCAmelCase__ = "\ntriplet_sum2(*dataset)\n"
UpperCAmelCase__ = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=1_0000 )
UpperCAmelCase__ = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=1_0000 )
return (min(_lowerCAmelCase ), min(_lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase : Optional[int] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 169 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : List[str] = 1_6
_lowerCAmelCase : List[Any] = 3_2
def lowerCAmelCase ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
UpperCAmelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
set_seed(_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase__ = os.path.split(_lowerCAmelCase )[-1].split("." )[0]
accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase__ = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_lowerCAmelCase ),
"epoch": epoch,
} , step=_lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowerCAmelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 169 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list ) -> list:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
for i in range(1 , lowerCAmelCase__ ):
lowercase = collection[i]
lowercase = 0
lowercase = i - 1
while low <= high:
lowercase = (low + high) // 2
if val < collection[mid]:
lowercase = mid - 1
else:
lowercase = mid + 1
for j in range(lowerCAmelCase__ , lowerCAmelCase__ , -1 ):
lowercase = collection[j - 1]
lowercase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase : List[str] =input("""Enter numbers separated by a comma:\n""").strip()
__lowerCAmelCase : Optional[Any] =[int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 32 | """simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 32 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> None:
lowercase__ : Optional[Any] = generate_pascal_triangle(__lowerCamelCase )
for row_idx in range(__lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def __UpperCAmelCase ( __lowerCamelCase ) -> list[list[int]]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowercase__ : list[list[int]] = []
for current_row_idx in range(__lowerCamelCase ):
lowercase__ : int = populate_current_row(__lowerCamelCase , __lowerCamelCase )
triangle.append(__lowerCamelCase )
return triangle
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> list[int]:
lowercase__ : Union[str, Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowercase__ , lowercase__ : Tuple = 1, 1
for current_col_idx in range(1 , __lowerCamelCase ):
calculate_current_element(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return current_row
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> None:
lowercase__ : Optional[Any] = triangle[current_row_idx - 1][current_col_idx - 1]
lowercase__ : Dict = triangle[current_row_idx - 1][current_col_idx]
lowercase__ : Dict = above_to_left_elt + above_to_right_elt
def __UpperCAmelCase ( __lowerCamelCase ) -> list[list[int]]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowercase__ : list[list[int]] = [[1]]
for row_index in range(1 , __lowerCamelCase ):
lowercase__ : Any = [0] + result[-1] + [0]
lowercase__ : List[Any] = row_index + 1
# Calculate the number of distinct elements in a row
lowercase__ : List[str] = sum(divmod(__lowerCamelCase , 2 ) )
lowercase__ : Dict = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowercase__ : List[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowercase__ : List[Any] = row_first_half + row_second_half
result.append(__lowerCamelCase )
return result
def __UpperCAmelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCamelCase , __lowerCamelCase ) -> None:
lowercase__ : str = f"""{func.__name__}({value})"""
lowercase__ : Optional[int] = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCamelCase , __lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 16 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 0 |
'''simple docstring'''
import os
import sys
import unittest
lowercase =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase =os.path.join(git_repo_path, 'src', 'diffusers')
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict =find_backend(' if not is_torch_available():')
self.assertEqual(__a , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCAmelCase : Optional[int] =find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(__a , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCAmelCase : str =find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(__a , 'torch_and_transformers_and_onnx')
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , __a)
self.assertIn('torch_and_transformers' , __a)
self.assertIn('flax_and_transformers' , __a)
self.assertIn('torch_and_transformers_and_onnx' , __a)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(__a , '\nCONSTANT = None\n')
_UpperCAmelCase : Optional[Any] =create_dummy_object('function' , '\'torch\'')
self.assertEqual(
__a , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase : List[str] ='\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCAmelCase : List[Any] =create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(__a , __a)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] ='# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCAmelCase : Union[str, Any] =create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , __a)
| 362 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="glpn"
def __init__( self , snake_case=3 , snake_case=4 , snake_case=[2, 2, 2, 2] , snake_case=[8, 4, 2, 1] , snake_case=[3_2, 6_4, 1_6_0, 2_5_6] , snake_case=[7, 3, 3, 3] , snake_case=[4, 2, 2, 2] , snake_case=[1, 2, 5, 8] , snake_case=[4, 4, 4, 4] , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=0.1 , snake_case=1E-6 , snake_case=6_4 , snake_case=1_0 , snake_case=-1 , **snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(**snake_case)
_UpperCAmelCase : Any =num_channels
_UpperCAmelCase : List[str] =num_encoder_blocks
_UpperCAmelCase : Optional[Any] =depths
_UpperCAmelCase : str =sr_ratios
_UpperCAmelCase : Dict =hidden_sizes
_UpperCAmelCase : List[str] =patch_sizes
_UpperCAmelCase : Any =strides
_UpperCAmelCase : List[str] =mlp_ratios
_UpperCAmelCase : Dict =num_attention_heads
_UpperCAmelCase : List[str] =hidden_act
_UpperCAmelCase : int =hidden_dropout_prob
_UpperCAmelCase : List[Any] =attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Tuple =drop_path_rate
_UpperCAmelCase : str =layer_norm_eps
_UpperCAmelCase : Optional[int] =decoder_hidden_size
_UpperCAmelCase : List[str] =max_depth
_UpperCAmelCase : Dict =head_in_index
| 242 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : str = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a : Dict = logging.get_logger(__name__)
a : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
a : Tuple = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
a : Optional[int] = {
'RUCAIBox/mvp': 1_024,
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
A = MvpTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: str = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop("""type""" ) )
UpperCAmelCase_: Dict = add_prefix_space
UpperCAmelCase_: List[str] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_: Optional[int] = """post_processor"""
UpperCAmelCase_: Any = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCAmelCase_: Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_: Optional[int] = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase_: int = tuple(state["""cls"""] )
UpperCAmelCase_: Any = False
if state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: Tuple = add_prefix_space
UpperCAmelCase_: Union[str, Any] = True
if state.get("""trim_offsets""", SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCAmelCase_: Optional[Any] = trim_offsets
UpperCAmelCase_: Dict = True
if changes_to_apply:
UpperCAmelCase_: Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop("""type""" ) )
UpperCAmelCase_: Dict = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: List[Any] = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCAmelCase_: str = value
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: int = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: Union[str, Any] = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Any = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> int:
UpperCAmelCase_: Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Dict = [self.sep_token_id]
UpperCAmelCase_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 147 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ = object()
# For specifying empty leaf dict `{}`
lowercase_ = object()
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ) -> Optional[int]:
__a = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(lowerCAmelCase__ ) - len(lowerCAmelCase__ ) + 1 ):
__a = [x.match(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , ks[i:] )]
if matches and all(lowerCAmelCase__ ):
return True
return False
def lowercase ( lowerCAmelCase__ : List[Any] ) -> Any:
def replace(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ):
for rule, replacement in rules:
if _match(lowerCAmelCase__ , lowerCAmelCase__ ):
return replacement
return val
return replace
def lowercase ( ) -> List[Any]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , lowerCAmelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , lowerCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , lowerCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , lowerCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = _get_partition_rules()
__a = _replacement_rules(lowerCAmelCase__ )
__a = {k: _unmatched for k in flatten_dict(lowerCAmelCase__ )}
__a = {k: replace(lowerCAmelCase__ , lowerCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase__ ) )
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCamelCase ( _A , _A=() , _A=None , _A="no" , _A="29500" ):
lowerCAmelCase_ = False
lowerCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
lowerCAmelCase_ = True
elif "IPython" in sys.modules:
lowerCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
lowerCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , _A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
lowerCAmelCase_ = 8
lowerCAmelCase_ = PrepareForLaunch(_A , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(_A , args=_A , nprocs=_A , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_A )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port=_A , mixed_precision=_A ):
lowerCAmelCase_ = PrepareForLaunch(_A , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_A )
def __UpperCamelCase ( _A , _A=() , _A=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
lowerCAmelCase_ = PrepareForLaunch(_A , debug=_A )
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
| 278 |
from functools import lru_cache
@lru_cache
def __UpperCamelCase ( _A ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__magic_name__ = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
__magic_name__ = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
__magic_name__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
UpperCAmelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _lowerCAmelCase ( A__: tuple ):
'''simple docstring'''
return x[0]
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
UpperCAmelCase = get_letter_count(A__ )
UpperCAmelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A__ )
UpperCAmelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=A__ )
UpperCAmelCase = ''''''.join(freq_to_letter[freq] )
UpperCAmelCase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A__ , reverse=A__ )
UpperCAmelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A__ )
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
UpperCAmelCase = get_frequency_order(A__ )
UpperCAmelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 152 |
# Function to print upper half of diamond (pyramid)
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
for i in range(0 , A__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def _lowerCAmelCase ( A__: Optional[int] ):
'''simple docstring'''
for i in range(A__ , 0 , -1 ):
for _ in range(A__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(A__ ) # upper half
reverse_floyd(A__ ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
__magic_name__ = 1
while K:
__magic_name__ = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__magic_name__ = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 152 | 1 |
def SCREAMING_SNAKE_CASE__ ( __a ):
return str(_UpperCAmelCase ) == str(_UpperCAmelCase )[::-1]
def SCREAMING_SNAKE_CASE__ ( __a ):
return int(_UpperCAmelCase ) + int(str(_UpperCAmelCase )[::-1] )
def SCREAMING_SNAKE_CASE__ ( __a = 1_00_00 ):
snake_case_ : Any = []
for num in range(1 , _UpperCAmelCase ):
snake_case_ : int = 0
snake_case_ : List[Any] = num
while iterations < 50:
snake_case_ : Tuple = sum_reverse(_UpperCAmelCase )
iterations += 1
if is_palindrome(_UpperCAmelCase ):
break
else:
lychrel_nums.append(_UpperCAmelCase )
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49 | 0 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a__ , a__ , a__ ) , 210 )
def __A ( self ):
self.assertRaisesRegex(a__ , """max_weight must greater than zero.""" )
def __A ( self ):
self.assertRaisesRegex(a__ , """Weight can not be negative.""" )
def __A ( self ):
self.assertRaisesRegex(a__ , """Profit can not be negative.""" )
def __A ( self ):
self.assertRaisesRegex(a__ , """max_weight must greater than zero.""" )
def __A ( self ):
self.assertRaisesRegex(
a__ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 351 | """simple docstring"""
import socket
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Optional[int] = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
_lowerCAmelCase : Optional[int] = socket.gethostname()
_lowerCAmelCase : Tuple = 12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" ,"""wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
_lowerCAmelCase : List[Any] = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCamelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 126 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __SCREAMING_SNAKE_CASE (snake_case__ ):
"""simple docstring"""
__a =field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__a =Features({'audio': Audio()} )
__a =Features({'labels': ClassLabel} )
__a ='audio'
__a ='labels'
def UpperCamelCase__ ( self : Optional[int] , __a : Union[str, Any] ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
_a = copy.deepcopy(self )
_a = self.label_schema.copy()
_a = features[self.label_column]
_a = label_schema
return task_template
@property
def UpperCamelCase__ ( self : str ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 63 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__snake_case ) , torch_builtin(__snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(__snake_case ) , gelu_new(__snake_case ) ) )
def a_ ( self ):
snake_case = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case = get_activation('''gelu''' )
snake_case = get_activation('''gelu_10''' )
snake_case = torch_builtin(__snake_case )
snake_case = geluaa(__snake_case )
snake_case = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def a_ ( self ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__snake_case ):
get_activation('''bogus''' )
with self.assertRaises(__snake_case ):
get_activation(__snake_case )
def a_ ( self ):
snake_case = get_activation('''gelu''' )
snake_case = 1
snake_case = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__snake_case ):
snake_case = acta.a
| 127 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A_ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
super().__init__(*A__ , **A__ )
self.check_model_type(A__ )
def __A ( self , A__=None , A__=None , A__=None , **A__ ):
A__ , A__ : List[Any] = {}, {}
if padding is not None:
A__ : int = padding
if truncation is not None:
A__ : List[str] = truncation
if top_k is not None:
A__ : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , A__ , A__ = None , **A__ ):
if isinstance(A__ , (Image.Image, str) ) and isinstance(A__ , A__ ):
A__ : str = {"""image""": image, """question""": question}
else:
A__ : List[str] = image
A__ : str = super().__call__(A__ , **A__ )
return results
def __A ( self , A__ , A__=False , A__=False ):
A__ : Optional[int] = load_image(inputs["""image"""] )
A__ : int = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=A__ , truncation=A__ )
A__ : Optional[int] = self.image_processor(images=A__ , return_tensors=self.framework )
model_inputs.update(A__ )
return model_inputs
def __A ( self , A__ ):
A__ : Dict = self.model(**A__ )
return model_outputs
def __A ( self , A__ , A__=5 ):
if top_k > self.model.config.num_labels:
A__ : str = self.model.config.num_labels
if self.framework == "pt":
A__ : Dict = model_outputs.logits.sigmoid()[0]
A__ , A__ : Any = probs.topk(A__ )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
A__ : str = scores.tolist()
A__ : Union[str, Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(A__ , A__ )]
| 141 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=3 , A__=7 , A__=True , A__=True , A__=False , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , ):
A__ : List[Any] = parent
A__ : List[str] = batch_size
A__ : Optional[int] = seq_length
A__ : Optional[int] = is_training
A__ : Any = use_input_mask
A__ : Tuple = use_token_type_ids
A__ : str = use_labels
A__ : Tuple = vocab_size
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Union[str, Any] = attention_probs_dropout_prob
A__ : List[str] = max_position_embeddings
A__ : Union[str, Any] = type_vocab_size
A__ : str = type_sequence_label_size
A__ : Tuple = initializer_range
A__ : Tuple = num_labels
A__ : Dict = num_choices
A__ : List[str] = scope
def __A ( self ):
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : int = random_attention_mask([self.batch_size, self.seq_length] )
A__ : str = None
A__ : Union[str, Any] = None
A__ : List[str] = None
A__ : Optional[Any] = None
if self.use_labels:
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=A__ , )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : List[str] = FalconModel(config=A__ )
model.to(A__ )
model.eval()
A__ : int = model(A__ , attention_mask=A__ )
A__ : Union[str, Any] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : Union[str, Any] = True
A__ : Union[str, Any] = FalconModel(A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
A__ : Union[str, Any] = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , )
A__ : List[str] = model(A__ , attention_mask=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : Any = FalconForCausalLM(config=A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : Optional[Any] = True
A__ : Union[str, Any] = True
A__ : int = FalconForCausalLM(config=A__ )
model.to(A__ )
model.eval()
# first forward pass
A__ : List[Any] = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , use_cache=A__ , )
A__ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
A__ : Optional[int] = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , output_hidden_states=A__ , )["""hidden_states"""][0]
A__ : Any = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , past_key_values=A__ , output_hidden_states=A__ , )["""hidden_states"""][0]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1e-3 ) )
def __A ( self ):
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = config_and_inputs
A__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__: Tuple = (FalconForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__: Optional[int] = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__: str = False
UpperCAmelCase__: int = False
def __A ( self ):
A__ : List[Any] = FalconModelTester(self )
A__ : Union[str, Any] = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ , *A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
A__ : Tuple = alibi
self.model_tester.create_and_check_model(A__ , *A__ )
def __A ( self ):
A__ , A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : int = input_dict["""input_ids"""]
A__ : int = input_ids.ne(1 ).to(A__ )
A__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Optional[int] = FalconForSequenceClassification(A__ )
model.to(A__ )
model.eval()
A__ : int = model(A__ , attention_mask=A__ , labels=A__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Dict = 3
A__ : Tuple = """single_label_classification"""
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(A__ )
A__ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Any = FalconForSequenceClassification(A__ )
model.to(A__ )
model.eval()
A__ : Any = model(A__ , attention_mask=A__ , labels=A__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[str] = input_dict["""input_ids"""]
A__ : List[str] = FalconForCausalLM(A__ )
model.to(A__ )
model.eval()
A__ : Any = model(A__ , use_cache=A__ )
A__ : Any = input_ids.shape[0]
A__ : Union[str, Any] = model._convert_to_rw_cache(result.past_key_values )
A__ : int = model._convert_cache_to_standard_format(A__ , A__ )
for layer in range(len(A__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __A ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[Any] = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Tuple = input_dict["""input_ids"""]
A__ : List[Any] = input_ids.ne(1 ).to(A__ )
A__ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Optional[int] = FalconForSequenceClassification(A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ , attention_mask=A__ , labels=A__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
A__ , A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(A__ , """use_cache""" ):
return
A__ : Optional[Any] = model_class(A__ ).to(A__ )
if "use_cache" not in inputs:
A__ : Optional[int] = True
A__ : List[Any] = model(**A__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
A__ : str = (
getattr(A__ , """decoder_layers""" , A__ )
or getattr(A__ , """num_decoder_layers""" , A__ )
or config.num_hidden_layers
)
A__ : Dict = getattr(A__ , """num_kv_heads""" , config.num_attention_heads )
A__ : List[str] = getattr(A__ , """d_model""" , config.hidden_size )
A__ : Union[str, Any] = embed_dim // num_attention_heads
A__ : str = outputs["""past_key_values"""]
self.assertEqual(len(A__ ) , A__ )
A__ , A__ : int = inputs["""input_ids"""].shape
for i in range(A__ ):
if config.new_decoder_architecture:
A__ : Any = config.num_attention_heads
elif config.multi_query:
A__ : List[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Dict = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
A__ : List[Any] = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(A__ )
A__ : Optional[int] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A__ )
A__ : Optional[Any] = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
A__ : Any = model.generate(**A__ , do_sample=A__ , max_new_tokens=19 )
A__ : Optional[int] = tokenizer.batch_decode(A__ )[0]
self.assertEqual(A__ , A__ )
@slow
def __A ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
A__ : Dict = AutoTokenizer.from_pretrained(A__ )
A__ : List[str] = FalconForCausalLM.from_pretrained(A__ )
model.eval()
model.to(A__ )
A__ : Union[str, Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**A__ , do_sample=A__ , max_new_tokens=4 )
model.generate(**A__ , do_sample=A__ , max_new_tokens=4 )
model.generate(**A__ , num_beams=2 , max_new_tokens=4 )
@slow
def __A ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
A__ : Dict = AutoTokenizer.from_pretrained(A__ )
A__ : Any = FalconForCausalLM.from_pretrained(A__ )
model.eval()
model.to(device=A__ )
A__ : List[str] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A__ )
# Test results are the same with and without cache
A__ : Tuple = model.generate(**A__ , do_sample=A__ , max_new_tokens=20 , use_cache=A__ )
A__ : Optional[Any] = model.generate(**A__ , do_sample=A__ , max_new_tokens=20 , use_cache=A__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 141 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ : Any = 16
lowerCAmelCase_ : Dict = 32
def _lowerCamelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained("bert-base-cased" )
_a = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
_a = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ : Dict = mocked_dataloaders # noqa: F811
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Tuple ) -> int:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
_a = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_a = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
set_seed(lowercase )
_a , _a = get_dataloaders(lowercase , lowercase )
_a = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_a = os.path.split(lowercase )[-1].split("." )[0]
accelerator.init_trackers(lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_a = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**lowercase )
_a = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_a = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**lowercase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowercase ),
"epoch": epoch,
} , step=lowercase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _lowerCamelCase ( ) -> Dict:
_a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
_a = parser.parse_args()
_a = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 63 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BlipImageProcessor'''
UpperCamelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = False
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = self.image_processor
def __call__( self : Optional[int] , _UpperCAmelCase : ImageInput = None , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCAmelCase_ = self.tokenizer
UpperCAmelCase_ = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
return text_encoding
# add pixel_values
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
if text is not None:
UpperCAmelCase_ = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
else:
UpperCAmelCase_ = None
if text_encoding is not None:
encoding_image_processor.update(_UpperCAmelCase )
return encoding_image_processor
def lowercase__ ( self : Optional[int] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Any , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 350 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''roberta'''
def __init__( self : int , _UpperCAmelCase : List[Any]=50265 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[Any]=1e-12 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple="absolute" , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=None , **_UpperCAmelCase : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 241 | 0 |
'''simple docstring'''
from ....utils import logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=2048):
"""simple docstring"""
lowerCAmelCase = config.__dict__
lowerCAmelCase = modal_hidden_size
if num_labels:
lowerCAmelCase = num_labels
| 272 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCamelCase__ = """\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"""
UpperCamelCase__ = """\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"""
UpperCamelCase__ = """\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n"""
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ):
return float((preds == labels).mean() )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any]="binary" ):
__lowerCAmelCase = simple_accuracy(A__ , A__ )
__lowerCAmelCase = float(fa_score(y_true=A__ , y_pred=A__ , average=A__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = {}
for id_pred, label in zip(A__ , A__ ):
__lowerCAmelCase = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
__lowerCAmelCase = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__lowerCAmelCase = [(pred, label)]
__lowerCAmelCase , __lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
__lowerCAmelCase , __lowerCAmelCase = zip(*A__ )
__lowerCAmelCase = fa_score(y_true=A__ , y_pred=A__ , average="macro" )
fas.append(A__ )
__lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(A__ ) )
ems.append(A__ )
__lowerCAmelCase = float(sum(A__ ) / len(A__ ) )
__lowerCAmelCase = sum(A__ ) / len(A__ )
__lowerCAmelCase = float(fa_score(y_true=A__ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ , fa_avg="macro" )
elif self.config_name == "record":
__lowerCAmelCase = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
__lowerCAmelCase = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(UpperCamelCase_ , UpperCamelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 357 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowercase = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if rng is None:
__UpperCamelCase :Optional[Any] = random.Random()
__UpperCamelCase :Union[str, Any] = 1
for dim in shape:
total_dims *= dim
__UpperCamelCase :str = []
for _ in range(__lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCamelCase :List[Any] = np.array(__lowerCamelCase , dtype=jnp.intaa ).reshape(__lowerCamelCase )
return output
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__UpperCamelCase :Any = ids_tensor(__lowerCamelCase , vocab_size=2 , rng=__lowerCamelCase )
# make sure that at least one token is attended to for each batch
__UpperCamelCase :List[str] = 1
return attn_mask
@require_flax
class lowerCamelCase_ :
'''simple docstring'''
a__ : Dict = None
a__ : Optional[int] = ()
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCamelCase :str = 2
__UpperCamelCase :int = inputs["input_ids"].shape[-1] // 2
__UpperCamelCase :Union[str, Any] = inputs["input_ids"][:max_batch_size, :sequence_length]
__UpperCamelCase :Tuple = jnp.ones_like(snake_case__)
__UpperCamelCase :str = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCamelCase :Any = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCamelCase :Union[str, Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Tuple = self._get_input_ids_and_config()
__UpperCamelCase :Union[str, Any] = False
__UpperCamelCase :Union[str, Any] = max_length
__UpperCamelCase :List[Any] = 0
for model_class in self.all_generative_model_classes:
__UpperCamelCase :List[Any] = model_class(snake_case__)
__UpperCamelCase :Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase :List[str] = getattr(snake_case__ , snake_case__)
__UpperCamelCase :Optional[int] = pt_model_class(snake_case__).eval()
__UpperCamelCase :Tuple = load_flax_weights_in_pytorch_model(snake_case__ , flax_model.params)
__UpperCamelCase :str = flax_model.generate(snake_case__).sequences
__UpperCamelCase :str = pt_model.generate(torch.tensor(snake_case__ , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCamelCase :Tuple = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :str = self._get_input_ids_and_config()
__UpperCamelCase :Union[str, Any] = False
__UpperCamelCase :List[str] = max_length
for model_class in self.all_generative_model_classes:
__UpperCamelCase :int = model_class(snake_case__)
__UpperCamelCase :Dict = model.generate(snake_case__).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__)
__UpperCamelCase :str = jit(model.generate)
__UpperCamelCase :Optional[int] = jit_generate(snake_case__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = self._get_input_ids_and_config()
__UpperCamelCase :Optional[Any] = True
__UpperCamelCase :int = max_length
for model_class in self.all_generative_model_classes:
__UpperCamelCase :List[Any] = model_class(snake_case__)
__UpperCamelCase :List[str] = model.generate(snake_case__).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__)
__UpperCamelCase :Optional[int] = jit(model.generate)
__UpperCamelCase :int = jit_generate(snake_case__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = self._get_input_ids_and_config()
__UpperCamelCase :List[str] = False
__UpperCamelCase :Optional[Any] = max_length
__UpperCamelCase :List[Any] = 2
for model_class in self.all_generative_model_classes:
__UpperCamelCase :int = model_class(snake_case__)
__UpperCamelCase :Any = model.generate(snake_case__).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__)
__UpperCamelCase :int = jit(model.generate)
__UpperCamelCase :Dict = jit_generate(snake_case__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :List[Any] = self._get_input_ids_and_config()
__UpperCamelCase :str = False
__UpperCamelCase :Optional[int] = max_length
__UpperCamelCase :Union[str, Any] = 2
__UpperCamelCase :Optional[int] = 2
for model_class in self.all_generative_model_classes:
__UpperCamelCase :str = model_class(snake_case__)
__UpperCamelCase :Dict = model.generate(snake_case__).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Any = self._get_input_ids_and_config()
__UpperCamelCase :int = True
__UpperCamelCase :Dict = max_length
__UpperCamelCase :Optional[int] = 0.8
__UpperCamelCase :Dict = 10
__UpperCamelCase :Optional[int] = 0.3
__UpperCamelCase :Tuple = 1
__UpperCamelCase :Optional[Any] = 8
__UpperCamelCase :List[Any] = 9
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Optional[int] = model_class(snake_case__)
__UpperCamelCase :Union[str, Any] = model.generate(snake_case__).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__)
__UpperCamelCase :Optional[int] = jit(model.generate)
__UpperCamelCase :Any = jit_generate(snake_case__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[str] = self._get_input_ids_and_config()
__UpperCamelCase :int = max_length
__UpperCamelCase :int = 1
__UpperCamelCase :Optional[int] = 8
__UpperCamelCase :Any = 9
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Optional[int] = model_class(snake_case__)
__UpperCamelCase :int = model.generate(snake_case__).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__)
__UpperCamelCase :List[Any] = jit(model.generate)
__UpperCamelCase :Union[str, Any] = jit_generate(snake_case__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :List[Any] = self._get_input_ids_and_config()
__UpperCamelCase :List[Any] = max_length
__UpperCamelCase :Dict = 2
__UpperCamelCase :Any = 1
__UpperCamelCase :str = 8
__UpperCamelCase :Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Union[str, Any] = model_class(snake_case__)
__UpperCamelCase :Union[str, Any] = model.generate(snake_case__).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__)
__UpperCamelCase :Optional[int] = jit(model.generate)
__UpperCamelCase :Optional[Any] = jit_generate(snake_case__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCamelCase :List[Any] = attention_mask.at[(0, 0)].set(0)
__UpperCamelCase :Tuple = False
__UpperCamelCase :Tuple = max_length
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Optional[int] = model_class(snake_case__)
__UpperCamelCase :str = model.generate(snake_case__ , attention_mask=snake_case__).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__)
__UpperCamelCase :List[str] = jit(model.generate)
__UpperCamelCase :Dict = jit_generate(snake_case__ , attention_mask=snake_case__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Any = self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCamelCase :List[str] = attention_mask.at[(0, 0)].set(0)
__UpperCamelCase :Optional[int] = True
__UpperCamelCase :Any = max_length
for model_class in self.all_generative_model_classes:
__UpperCamelCase :str = model_class(snake_case__)
__UpperCamelCase :Optional[Any] = model.generate(snake_case__ , attention_mask=snake_case__).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__)
__UpperCamelCase :Optional[Any] = jit(model.generate)
__UpperCamelCase :List[str] = jit_generate(snake_case__ , attention_mask=snake_case__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCamelCase :Optional[int] = attention_mask.at[(0, 0)].set(0)
__UpperCamelCase :Optional[Any] = 2
__UpperCamelCase :Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__UpperCamelCase :Union[str, Any] = model_class(snake_case__)
__UpperCamelCase :Optional[Any] = model.generate(snake_case__ , attention_mask=snake_case__).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__)
__UpperCamelCase :List[Any] = jit(model.generate)
__UpperCamelCase :str = jit_generate(snake_case__ , attention_mask=snake_case__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''')
__UpperCamelCase :List[str] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
__UpperCamelCase :Any = "Hello world"
__UpperCamelCase :str = tokenizer(snake_case__ , return_tensors='''np''').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(snake_case__ , '''do_samples'''):
model.generate(snake_case__ , do_samples=snake_case__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(snake_case__ , '''foo'''):
__UpperCamelCase :Optional[Any] = {"foo": "bar"}
model.generate(snake_case__ , **snake_case__)
| 43 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ):
snake_case : List[str] = []
snake_case : Optional[int] = []
snake_case : Any = []
for rt in rc.restypes:
snake_case : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
snake_case : str = {name: i for i, name in enumerate(__lowerCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
snake_case : Optional[Any] = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
snake_case : List[Any] = torch.tensor(
__lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
snake_case : int = torch.tensor(
__lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
snake_case : int = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
snake_case : List[Any] = restype_atomaa_to_atomaa[protein_aatype]
snake_case : str = restype_atomaa_mask[protein_aatype]
snake_case : str = residx_atomaa_mask
snake_case : Any = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
snake_case : List[str] = restype_atomaa_to_atomaa[protein_aatype]
snake_case : List[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
snake_case : Union[str, Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
snake_case : Optional[int] = rc.restype_atoa[restype_letter]
snake_case : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
snake_case : List[Any] = rc.atom_order[atom_name]
snake_case : Optional[Any] = 1
snake_case : List[Any] = restype_atomaa_mask[protein_aatype]
snake_case : int = residx_atomaa_mask
return protein
def UpperCamelCase ( __lowerCamelCase : Dict[str, torch.Tensor] ):
snake_case : Dict = tree_map(lambda __lowerCamelCase : torch.tensor(__lowerCamelCase , device=batch["aatype"].device ) , __lowerCamelCase , np.ndarray )
snake_case : List[str] = tensor_tree_map(lambda __lowerCamelCase : np.array(__lowerCamelCase ) , make_atomaa_masks(__lowerCamelCase ) )
return out
| 59 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> List[str]:
snake_case : Dict = FunnelConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case : Dict = FunnelBaseModel(a__ ) if base_model else FunnelModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowerCamelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 369 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
if isinstance(lowercase ,torch.Tensor ):
return image
elif isinstance(lowercase ,PIL.Image.Image ):
snake_case : str = [image]
if isinstance(image[0] ,PIL.Image.Image ):
snake_case : List[Any] = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
snake_case : Optional[int] = np.concatenate(lowercase ,axis=0 )
snake_case : str = np.array(lowercase ).astype(np.floataa ) / 255.0
snake_case : List[str] = image.transpose(0 ,3 ,1 ,2 )
snake_case : Any = 2.0 * image - 1.0
snake_case : Optional[Any] = torch.from_numpy(lowercase )
elif isinstance(image[0] ,torch.Tensor ):
snake_case : Optional[int] = torch.cat(lowercase ,dim=0 )
return image
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=0.9995 ) -> Optional[int]:
if not isinstance(lowercase ,np.ndarray ):
snake_case : Any = True
snake_case : str = va.device
snake_case : Optional[Any] = va.cpu().numpy()
snake_case : str = va.cpu().numpy()
snake_case : Tuple = np.sum(va * va / (np.linalg.norm(lowercase ) * np.linalg.norm(lowercase )) )
if np.abs(lowercase ) > DOT_THRESHOLD:
snake_case : Optional[int] = (1 - t) * va + t * va
else:
snake_case : List[Any] = np.arccos(lowercase )
snake_case : str = np.sin(lowercase )
snake_case : int = theta_a * t
snake_case : Dict = np.sin(lowercase )
snake_case : Optional[Any] = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case : Union[str, Any] = sin_theta_t / sin_theta_a
snake_case : Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
snake_case : List[Any] = torch.from_numpy(lowercase ).to(lowercase )
return va
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
snake_case : Dict = F.normalize(lowercase ,dim=-1 )
snake_case : Optional[Any] = F.normalize(lowercase ,dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
for param in model.parameters():
snake_case : Tuple = value
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> List[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
snake_case : Optional[int] = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size["""shortest_edge"""]
)
snake_case : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def UpperCAmelCase ( self , A = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase ( self ) -> Optional[int]:
self.enable_attention_slicing(A )
def UpperCAmelCase ( self ) -> Any:
set_requires_grad(self.vae , A )
def UpperCAmelCase ( self ) -> List[Any]:
set_requires_grad(self.vae , A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
set_requires_grad(self.unet , A )
def UpperCAmelCase ( self ) -> Tuple:
set_requires_grad(self.unet , A )
def UpperCAmelCase ( self , A , A , A ) -> Dict:
# get the original timestep using init_timestep
snake_case : Tuple = min(int(num_inference_steps * strength ) , A )
snake_case : List[str] = max(num_inference_steps - init_timestep , 0 )
snake_case : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , A , A , A , A , A , A=None ) -> List[str]:
if not isinstance(A , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(A )}""" )
snake_case : str = image.to(device=A , dtype=A )
if isinstance(A , A ):
snake_case : int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
snake_case : str = torch.cat(A , dim=0 )
else:
snake_case : List[Any] = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case : Dict = 0.1_82_15 * init_latents
snake_case : Tuple = init_latents.repeat_interleave(A , dim=0 )
snake_case : Optional[int] = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
snake_case : Union[str, Any] = self.scheduler.add_noise(A , A , A )
snake_case : List[Any] = init_latents
return latents
def UpperCAmelCase ( self , A ) -> int:
snake_case : Optional[Any] = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
snake_case : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def UpperCAmelCase ( self , A , A ) -> List[Any]:
snake_case : Tuple = self.feature_extractor.preprocess(A )
snake_case : List[Any] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case : Optional[int] = self.clip_model.get_image_features(A )
snake_case : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
snake_case : Tuple = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , A , A , A , A , A , A , A , ) -> Any:
snake_case : Dict = latents.detach().requires_grad_()
snake_case : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
snake_case : str = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case : int = self.scheduler.alphas_cumprod[timestep]
snake_case : Tuple = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case : Any = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case : str = torch.sqrt(A )
snake_case : str = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
snake_case : int = self.scheduler.sigmas[index]
snake_case : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case : List[str] = 1 / 0.1_82_15 * sample
snake_case : str = self.vae.decode(A ).sample
snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : str = transforms.Resize(self.feature_extractor_size )(A )
snake_case : Dict = self.normalize(A ).to(latents.dtype )
snake_case : Union[str, Any] = self.clip_model.get_image_features(A )
snake_case : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
snake_case : Optional[int] = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
snake_case : int = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
snake_case : Union[str, Any] = latents.detach() + grads * (sigma**2)
snake_case : Union[str, Any] = noise_pred_original
else:
snake_case : List[str] = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , A , A , A = None , A = None , A = 5_1_2 , A = 5_1_2 , A = 0.6 , A = 5_0 , A = 7.5 , A = 1 , A = 0.0 , A = 1_0_0 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> Union[str, Any]:
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(A )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(A , torch.Generator ) and batch_size > 1:
snake_case : Dict = [generator] + [None] * (batch_size - 1)
snake_case : Tuple = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
snake_case : List[str] = [x[0] for x in coca_is_none if x[1]]
snake_case : Optional[int] = """, """.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
snake_case : Tuple = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
snake_case : List[Any] = self.get_image_description(A )
# get prompt text embeddings for content and style
snake_case : Dict = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors="""pt""" , )
snake_case : str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case : Dict = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors="""pt""" , )
snake_case : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case : List[str] = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
snake_case : List[Any] = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
snake_case : Union[str, Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_offset:
snake_case : Dict = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case , snake_case : List[Any] = self.get_timesteps(A , A , self.device )
snake_case : List[str] = timesteps[:1].repeat(A )
# Preprocess image
snake_case : Dict = preprocess(A , A , A )
snake_case : List[Any] = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
snake_case : Optional[int] = preprocess(A , A , A )
snake_case : Optional[Any] = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
snake_case : str = slerp(A , A , A )
if clip_guidance_scale > 0:
snake_case : List[Any] = self.get_clip_image_embeddings(A , A )
snake_case : Any = self.get_clip_image_embeddings(A , A )
snake_case : Tuple = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case : List[str] = content_text_input.input_ids.shape[-1]
snake_case : Any = self.tokenizer([""""""] , padding="""max_length""" , max_length=A , return_tensors="""pt""" )
snake_case : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case : Tuple = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case : List[Any] = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
snake_case : Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
snake_case : Union[str, Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Dict = {}
if accepts_eta:
snake_case : Union[str, Any] = eta
# check if the scheduler accepts generator
snake_case : List[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case : List[str] = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : List[str] = self.scheduler.scale_model_input(A , A )
# predict the noise residual
snake_case : Any = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case , snake_case : int = noise_pred.chunk(2 )
snake_case : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case : Any = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case , snake_case : List[Any] = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
snake_case : Tuple = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case : str = 1 / 0.1_82_15 * latents
snake_case : Optional[Any] = self.vae.decode(A ).sample
snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 176 | 0 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(F"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(F"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(F"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 5 |
from math import ceil
def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A__ = 2 * i + 1
A__ = 2 * i
A__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 274 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Dict = logging.get_logger(__name__)
_a : Tuple = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Tuple ="""deformable_detr"""
a : int ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3_00,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=6,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=8,__SCREAMING_SNAKE_CASE=6,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=8,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE="relu",__SCREAMING_SNAKE_CASE=2_56,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=1.0,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE="sine",__SCREAMING_SNAKE_CASE="resnet50",__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=3_00,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=5,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=5,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.25,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = backbone_config.get("""model_type""" )
__lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase = config_class.from_dict(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = use_timm_backbone
__lowerCAmelCase = backbone_config
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_queries
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = init_xavier_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = auxiliary_loss
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = backbone
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = dilation
# deformable attributes
__lowerCAmelCase = num_feature_levels
__lowerCAmelCase = encoder_n_points
__lowerCAmelCase = decoder_n_points
__lowerCAmelCase = two_stage
__lowerCAmelCase = two_stage_num_proposals
__lowerCAmelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__lowerCAmelCase = class_cost
__lowerCAmelCase = bbox_cost
__lowerCAmelCase = giou_cost
# Loss coefficients
__lowerCAmelCase = mask_loss_coefficient
__lowerCAmelCase = dice_loss_coefficient
__lowerCAmelCase = bbox_loss_coefficient
__lowerCAmelCase = giou_loss_coefficient
__lowerCAmelCase = eos_coefficient
__lowerCAmelCase = focal_alpha
__lowerCAmelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.d_model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowerCAmelCase = self.backbone_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 46 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : List[str] = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 46 | 1 |
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
a_ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 32 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
snake_case__ : Any = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple:
a_ : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) )
a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : int = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : List[Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self.get_dummy_inputs()
a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Optional[Any] = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
a_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.get_dummy_inputs()
a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : int = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.get_dummy_inputs()
a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Union[str, Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : List[str] = ort.SessionOptions()
a_ : int = False
return options
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a_ : int = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = 'A fantasy landscape, trending on artstation'
a_ : str = torch.manual_seed(0 )
a_ : List[str] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : Dict = output.images
a_ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a_ : List[str] = init_image.resize((1_2_8, 1_2_8) )
a_ : Dict = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
a_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Any = 'A fantasy landscape, trending on artstation'
a_ : Tuple = torch.manual_seed(0 )
a_ : Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : str = output.images
a_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Tuple = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 32 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : int = 'nllb-moe'
lowercase : Optional[int] = ['past_key_values']
lowercase : str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCamelCase=12_81_12 , __UpperCamelCase=10_24 , __UpperCamelCase=12 , __UpperCamelCase=40_96 , __UpperCamelCase=16 , __UpperCamelCase=12 , __UpperCamelCase=40_96 , __UpperCamelCase=16 , __UpperCamelCase=0.05 , __UpperCamelCase=0.05 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=10_24 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=2 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="float32" , __UpperCamelCase=False , __UpperCamelCase=1_28 , __UpperCamelCase=64 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=0.001 , __UpperCamelCase=0.001 , __UpperCamelCase="all" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=1.0 , __UpperCamelCase=0.2 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=False , **__UpperCamelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : Dict = max_position_embeddings
__UpperCamelCase : str = d_model
__UpperCamelCase : List[str] = encoder_ffn_dim
__UpperCamelCase : Optional[Any] = encoder_layers
__UpperCamelCase : List[Any] = encoder_attention_heads
__UpperCamelCase : int = decoder_ffn_dim
__UpperCamelCase : Union[str, Any] = decoder_layers
__UpperCamelCase : Union[str, Any] = decoder_attention_heads
__UpperCamelCase : str = dropout
__UpperCamelCase : int = attention_dropout
__UpperCamelCase : List[str] = activation_dropout
__UpperCamelCase : List[str] = activation_function
__UpperCamelCase : Union[str, Any] = init_std
__UpperCamelCase : Any = encoder_layerdrop
__UpperCamelCase : int = decoder_layerdrop
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Union[str, Any] = encoder_layers
__UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase : Any = router_z_loss_coef
__UpperCamelCase : str = router_aux_loss_coef
__UpperCamelCase : int = decoder_sparse_step
__UpperCamelCase : str = encoder_sparse_step
__UpperCamelCase : List[str] = num_experts
__UpperCamelCase : Dict = expert_capacity
__UpperCamelCase : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__UpperCamelCase : str = router_dtype
__UpperCamelCase : Dict = router_ignore_padding_tokens
__UpperCamelCase : Dict = batch_prioritized_routing
__UpperCamelCase : int = second_expert_policy
__UpperCamelCase : Tuple = normalize_router_prob_before_dropping
__UpperCamelCase : int = moe_eval_capacity_token_fraction
__UpperCamelCase : List[str] = moe_token_dropout
__UpperCamelCase : int = output_router_logits
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , ) | 171 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : List[str] = None
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : int = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
lowercase : Optional[Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
lowercase : List[str] = {
"google/rembert": 256,
}
lowercase : Tuple = "▁"
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[int] = VOCAB_FILES_NAMES
lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] = RemBertTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="[CLS]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<unk>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<pad>" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , **__UpperCamelCase , ) -> Dict:
'''simple docstring'''
__UpperCamelCase : str = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
__UpperCamelCase : Any = do_lower_case
__UpperCamelCase : List[str] = remove_space
__UpperCamelCase : Optional[Any] = keep_accents
__UpperCamelCase : Union[str, Any] = vocab_file
__UpperCamelCase : Any = False if not self.vocab_file else True
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : Any = [self.sep_token_id]
__UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = [self.sep_token_id]
__UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(__UpperCamelCase ) )
return
__UpperCamelCase : Optional[int] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,) | 171 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
A: str = logging.getLogger(__name__)
def _snake_case ( ):
UpperCAmelCase : List[str] = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=UpperCamelCase , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=UpperCamelCase , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=UpperCamelCase , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=UpperCamelCase , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=UpperCamelCase , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=UpperCamelCase , type=UpperCamelCase , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=UpperCamelCase , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=UpperCamelCase , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
UpperCAmelCase : Dict = parser.parse_args()
return args
def _snake_case ( UpperCamelCase : str ):
def fn(UpperCamelCase : Optional[int] ):
return tokenizer(examples["""text"""] )
return fn
def _snake_case ( UpperCamelCase : Optional[int] ):
UpperCAmelCase : List[Any] = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
UpperCAmelCase : List[str] = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
UpperCAmelCase : Union[str, Any] = tf.train.Features(feature=UpperCamelCase )
UpperCAmelCase : int = tf.train.Example(features=UpperCamelCase )
UpperCAmelCase : Tuple = example.SerializeToString()
records.append(UpperCamelCase )
return records
def _snake_case ( UpperCamelCase : List[Any] ):
UpperCAmelCase : Any = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCAmelCase : List[Any] = min(len(UpperCamelCase ) , args.limit )
UpperCAmelCase : Any = dataset.select(range(UpperCamelCase ) )
print(F"Limiting the dataset to {args.limit} entries." )
UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase : Optional[Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCamelCase ):
os.makedirs(UpperCamelCase )
else:
UpperCAmelCase : Tuple = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase : str = tokenize_function(UpperCamelCase )
UpperCAmelCase : str = dataset.map(UpperCamelCase , batched=UpperCamelCase , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCamelCase : Any ):
# Concatenate all texts.
UpperCAmelCase : List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase : Dict = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase : Any = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase : Any = dataset_tokenized.map(UpperCamelCase , batched=UpperCamelCase , batch_size=1000 , num_proc=4 )
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = 0
for shard in range(0 , len(UpperCamelCase ) , args.shard_size ):
UpperCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase : Dict = len(dataset_snapshot["""input_ids"""] )
UpperCAmelCase : Dict = os.path.join(UpperCamelCase , F"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCAmelCase : Union[str, Any] = get_serialized_examples(UpperCamelCase )
with tf.io.TFRecordWriter(UpperCamelCase ) as out_file:
for i in range(len(UpperCamelCase ) ):
UpperCAmelCase : List[Any] = serialized_examples[i]
out_file.write(UpperCamelCase )
print("""Wrote file {} containing {} records""".format(UpperCamelCase , UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , """w""" ) as f:
print(F"Total {args.split} records: {total_records}" , file=UpperCamelCase )
if __name__ == "__main__":
A: Union[str, Any] = parse_args()
main(args)
| 109 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_A = logging.get_logger(__name__)
class _lowerCamelCase :
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = question_encoder
lowerCAmelCase__ : Optional[int] = generator
lowerCAmelCase__ : Optional[int] = self.question_encoder
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
if os.path.isfile(UpperCamelCase ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , """question_encoder_tokenizer""" )
lowerCAmelCase__ : List[Any] = os.path.join(UpperCamelCase , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase )
self.generator.save_pretrained(UpperCamelCase )
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] , UpperCamelCase : List[str] , **UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCAmelCase__ : Dict = kwargs.pop("""config""" , UpperCamelCase )
if config is None:
lowerCAmelCase__ : int = RagConfig.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(
UpperCamelCase , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(
UpperCamelCase , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase , generator=UpperCamelCase )
def __call__( self : Dict , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return self.current_tokenizer(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Dict , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return self.generator.batch_decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return self.generator.decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.question_encoder
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.generator
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[List[str]] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "longest" , UpperCamelCase : str = None , UpperCamelCase : bool = True , **UpperCamelCase : Union[str, Any] , ) -> BatchEncoding:
"""simple docstring"""
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase , )
if max_length is None:
lowerCAmelCase__ : Any = self.current_tokenizer.model_max_length
lowerCAmelCase__ : Tuple = self(
UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , max_length=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , **UpperCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCAmelCase__ : Tuple = self.current_tokenizer.model_max_length
lowerCAmelCase__ : Tuple = self(
text_target=UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Any = labels["""input_ids"""]
return model_inputs
| 242 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=UpperCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=UpperCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=UpperCamelCase )
return parser.parse_args()
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Any = parse_args()
# Import training_script as a module.
lowerCamelCase__ : Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase__ : Dict = script_fpath.stem
lowerCamelCase__ : Tuple = importlib.import_module(UpperCamelCase )
# Patch sys.argv
lowerCamelCase__ : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 355 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_A : Optional[int] =pd.read_csv('''sample_data.csv''', header=None)
_A : Any =df.shape[:1][0]
# If you're using some other dataset input the target column
_A : List[str] =df.iloc[:, 1:2]
_A : int =actual_data.values.reshape(len_data, 1)
_A : Union[str, Any] =MinMaxScaler().fit_transform(actual_data)
_A : Optional[int] =10
_A : Union[str, Any] =5
_A : Union[str, Any] =20
_A : str =len_data - periods * look_back
_A : List[Any] =actual_data[:division]
_A : Optional[Any] =actual_data[division - look_back :]
_A , _A : Tuple =[], []
_A , _A : List[str] =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_A : List[Any] =np.array(train_x)
_A : str =np.array(test_x)
_A : List[Any] =np.array([list(i.ravel()) for i in train_y])
_A : Any =np.array([list(i.ravel()) for i in test_y])
_A : Optional[Any] =Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_A : Dict =model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_A : List[str] =model.predict(x_test)
| 129 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.