code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Dict = '''BlipImageProcessor'''
_UpperCAmelCase : List[str] = '''AutoTokenizer'''
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Dict = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.image_processor
def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : ImageInput = None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None:
__lowerCamelCase : Dict = self.tokenizer
__lowerCamelCase : Tuple = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
# add pixel_values
__lowerCamelCase : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__)
if text is not None:
__lowerCamelCase : Optional[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
else:
__lowerCamelCase : Any = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : str ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Any):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : Tuple):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase ( self : str):
__lowerCamelCase : int = self.tokenizer.model_input_names
__lowerCamelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )-> Optional[Any]:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def _snake_case ( self )-> Dict:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self )-> List[str]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> str:
lowerCamelCase_ =FlaubertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[Any]:
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[int]:
lowerCamelCase_ =FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((lowerCamelCase_) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Any:
lowerCamelCase_ =FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> List[Any]:
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Dict:
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase:str = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> List[Any]:
lowerCamelCase_ =super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def _snake_case ( self )-> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> Optional[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) )
lowerCamelCase_ =torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , """traced_model.pt""" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["""input_ids"""].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 154
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 44
|
from sklearn.metrics import mean_squared_error
import datasets
A__ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A__ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A__ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] ,)
def __lowerCamelCase ( self :Tuple ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __lowerCamelCase ( self :List[str] ,__lowercase :Optional[int] ,__lowercase :int ,__lowercase :Any=None ,__lowercase :List[str]="uniform_average" ,__lowercase :List[Any]=True ):
snake_case__ : Union[str, Any] = mean_squared_error(
__lowercase ,__lowercase ,sample_weight=__lowercase ,multioutput=__lowercase ,squared=__lowercase )
return {"mse": mse}
| 44
| 1
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' )
A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase : str = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 3
|
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
def __init__(self : Union[str, Any] , snake_case_ : int ):
__a : Dict = num_of_nodes
__a : list[list[int]] = []
__a : dict[int, int] = {}
def lowerCAmelCase (self : Optional[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowerCAmelCase (self : Any , snake_case_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCAmelCase (self : str , snake_case_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__a : Optional[int] = self.find_component(snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : list[int] , snake_case_ : int , snake_case_ : int ):
if component_size[u_node] <= component_size[v_node]:
__a : List[str] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_ )
elif component_size[u_node] >= component_size[v_node]:
__a : Optional[int] = self.find_component(snake_case_ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_ )
def lowerCAmelCase (self : Optional[Any] ):
__a : str = []
__a : int = 0
__a : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__a : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__a , __a , __a : Optional[Any] = edge
__a : List[str] = self.m_component[u]
__a : List[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__a : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ , snake_case_ ):
__a , __a , __a : str = edge
__a : Any = self.m_component[u]
__a : Any = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ , snake_case_ , snake_case_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__a : Optional[int] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case : Tuple = parent
__snake_case : Tuple = batch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[Any] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Dict = size
__snake_case : Dict = do_center_crop
__snake_case : Dict = crop_size
__snake_case : str = do_flip_channel_order
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''center_crop''' ) )
self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 360
|
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __SCREAMING_SNAKE_CASE (_snake_case ):
"""simple docstring"""
__a ="""vivit"""
def __init__( self : str , __a : Tuple=2_24 , __a : Union[str, Any]=32 , __a : Dict=[2, 16, 16] , __a : List[Any]=3 , __a : Optional[int]=7_68 , __a : int=12 , __a : str=12 , __a : Any=30_72 , __a : Union[str, Any]="gelu_fast" , __a : Union[str, Any]=0.0 , __a : Tuple=0.0 , __a : Optional[Any]=0.02 , __a : str=1e-0_6 , __a : Any=True , **__a : str , ):
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = image_size
_a = num_frames
_a = tubelet_size
_a = num_channels
_a = qkv_bias
super().__init__(**lowercase_ )
| 63
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=2 , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : List[str]=10 , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=32 * 4 , lowercase_ : str=32 * 6 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=32 , ) -> Optional[int]:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : int = is_training
UpperCAmelCase : int = use_auxiliary_loss
UpperCAmelCase : List[Any] = num_queries
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : List[str] = min_size
UpperCAmelCase : Dict = max_size
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : str = mask_feature_size
def UpperCAmelCase_ ( self : int ) -> int:
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase_ )
UpperCAmelCase : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_ )
UpperCAmelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_ ) > 0.5
).float()
UpperCAmelCase : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=lowercase_ ) > 0.5).long()
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> int:
UpperCAmelCase : int = output.encoder_hidden_states
UpperCAmelCase : Any = output.pixel_decoder_hidden_states
UpperCAmelCase : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict=False ) -> Tuple:
with torch.no_grad():
UpperCAmelCase : str = MaskFormerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Union[str, Any] = model(lowercase_ , output_hidden_states=lowercase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : str ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
def comm_check_on_output(lowercase_ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Dict = model(lowercase_ )
comm_check_on_output(lowercase_ )
UpperCAmelCase : Any = model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
comm_check_on_output(lowercase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase_ : Optional[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Tuple = False
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Optional[Any] = MaskFormerModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowercase_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def UpperCAmelCase_ ( self : str ) -> List[str]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(lowercase_ )
UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase : Tuple = MaskFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Optional[Any] = (self.model_tester.min_size,) * 2
UpperCAmelCase : str = {
'pixel_values': torch.randn((2, 3, *size) , device=lowercase_ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowercase_ ),
'class_labels': torch.zeros(2 , 10 , device=lowercase_ ).long(),
}
UpperCAmelCase : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowercase_ )
UpperCAmelCase : Optional[int] = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ ).to(lowercase_ )
UpperCAmelCase : List[Any] = model(**lowercase_ , output_attentions=lowercase_ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Dict = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : Tuple = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ).loss
loss.backward()
def UpperCAmelCase_ ( self : List[str] ) -> str:
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Optional[int] = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : List[str] = True
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : List[str] = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ = 1e-4
def UpperCamelCase( ):
UpperCAmelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
UpperCAmelCase : List[Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(lowercase_ )
UpperCAmelCase : Dict = self.default_image_processor
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**lowercase_ )
UpperCAmelCase : str = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : int = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : Optional[int] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Optional[Any] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : str = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : int = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Dict = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Optional[int] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCAmelCase : Optional[int] = inputs['pixel_values'].to(lowercase_ )
UpperCAmelCase : Optional[Any] = [el.to(lowercase_ ) for el in inputs['mask_labels']]
UpperCAmelCase : List[str] = [el.to(lowercase_ ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
| 151
| 0
|
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowercase: List[str] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
__lowercase: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = "https://pypi.org/pypi/diffusers/json"
UpperCamelCase__ = json.loads(request.urlopen(_UpperCamelCase ).read() )["releases"].keys()
return sorted(_UpperCamelCase , key=lambda _UpperCamelCase : version.Version(_UpperCamelCase ) )
def SCREAMING_SNAKE_CASE__( ) -> str:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCamelCase__ = Path(_UpperCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, os.PathLike] ) -> int:
'''simple docstring'''
init_hf_modules()
UpperCamelCase__ = Path(_UpperCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCamelCase__ = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" ) as f:
UpperCamelCase__ = f.read()
# Imports of the form `import .xxx`
UpperCamelCase__ = re.findall("^\s*import\s+\.(\S+)\s*$" , _UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _UpperCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_UpperCamelCase ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ = False
UpperCamelCase__ = [module_file]
UpperCamelCase__ = []
# Let's recurse through all relative imports
while not no_change:
UpperCamelCase__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_UpperCamelCase ) )
UpperCamelCase__ = Path(_UpperCamelCase ).parent
UpperCamelCase__ = [str(module_path / m ) for m in new_imports]
UpperCamelCase__ = [f for f in new_import_files if f not in all_relative_imports]
UpperCamelCase__ = [F'{f}.py' for f in new_import_files]
UpperCamelCase__ = len(_UpperCamelCase ) == 0
all_relative_imports.extend(_UpperCamelCase )
return all_relative_imports
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" ) as f:
UpperCamelCase__ = f.read()
# Imports of the form `import xxx`
UpperCamelCase__ = re.findall("^\s*import\s+(\S+)\s*$" , _UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _UpperCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
UpperCamelCase__ = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
UpperCamelCase__ = list(set(_UpperCamelCase ) )
UpperCamelCase__ = []
for imp in imports:
try:
importlib.import_module(_UpperCamelCase )
except ImportError:
missing_packages.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F'{", ".join(_UpperCamelCase )}. Run `pip install {" ".join(_UpperCamelCase )}`' )
return get_relative_imports(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = module_path.replace(os.path.sep , "." )
UpperCamelCase__ = importlib.import_module(_UpperCamelCase )
if class_name is None:
return find_pipeline_class(_UpperCamelCase )
return getattr(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
UpperCamelCase__ = dict(inspect.getmembers(_UpperCamelCase , inspect.isclass ) )
UpperCamelCase__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _UpperCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
UpperCamelCase__ = cls
return pipeline_class
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, os.PathLike] , _UpperCamelCase : str , _UpperCamelCase : Optional[Union[str, os.PathLike]] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[Dict[str, str]] = None , _UpperCamelCase : Optional[Union[bool, str]] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : bool = False , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.isfile(_UpperCamelCase ):
UpperCamelCase__ = module_file_or_url
UpperCamelCase__ = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
UpperCamelCase__ = get_diffusers_versions()
# cut ".dev0"
UpperCamelCase__ = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
UpperCamelCase__ = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
UpperCamelCase__ = F'v{revision}'
elif revision == "main":
UpperCamelCase__ = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
UpperCamelCase__ = COMMUNITY_PIPELINES_URL.format(revision=_UpperCamelCase , pipeline=_UpperCamelCase )
try:
UpperCamelCase__ = cached_download(
_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , local_files_only=_UpperCamelCase , use_auth_token=_UpperCamelCase , )
UpperCamelCase__ = "git"
UpperCamelCase__ = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
UpperCamelCase__ = hf_hub_download(
_UpperCamelCase , _UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , local_files_only=_UpperCamelCase , use_auth_token=_UpperCamelCase , )
UpperCamelCase__ = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
UpperCamelCase__ = check_imports(_UpperCamelCase )
# Now we move the module inside our cached dynamic modules.
UpperCamelCase__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_UpperCamelCase )
UpperCamelCase__ = Path(_UpperCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_UpperCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
UpperCamelCase__ = F'{module_needed}.py'
shutil.copy(os.path.join(_UpperCamelCase , _UpperCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = use_auth_token
elif use_auth_token is True:
UpperCamelCase__ = HfFolder.get_token()
else:
UpperCamelCase__ = None
UpperCamelCase__ = model_info(_UpperCamelCase , revision=_UpperCamelCase , token=_UpperCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCamelCase__ = submodule_path / commit_hash
UpperCamelCase__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_UpperCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_UpperCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_UpperCamelCase , F'{module_needed}.py' , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , resume_download=_UpperCamelCase , proxies=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , local_files_only=_UpperCamelCase , )
return os.path.join(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, os.PathLike] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, os.PathLike]] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[Dict[str, str]] = None , _UpperCamelCase : Optional[Union[bool, str]] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : bool = False , **_UpperCamelCase : Optional[int] , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = get_cached_module_file(
_UpperCamelCase , _UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , resume_download=_UpperCamelCase , proxies=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , local_files_only=_UpperCamelCase , )
return get_class_in_module(_UpperCamelCase , final_module.replace(".py" , "" ) )
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 2
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323
| 0
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCamelCase )
UpperCamelCase_ = -1
UpperCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
UpperCamelCase_ = model.generate(_lowerCamelCase , max_new_tokens=1_0 , do_sample=_lowerCamelCase )
UpperCamelCase_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase_ = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1_0 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase_ = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCamelCase )
UpperCamelCase_ = -1
UpperCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
UpperCamelCase_ = model.generate(_lowerCamelCase , max_new_tokens=1_0 , do_sample=_lowerCamelCase )
UpperCamelCase_ = tokenizer.decode(greedy_ids[0] )
UpperCamelCase_ = TextIteratorStreamer(_lowerCamelCase )
UpperCamelCase_ = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
UpperCamelCase_ = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
UpperCamelCase_ = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCamelCase )
UpperCamelCase_ = -1
UpperCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
UpperCamelCase_ = model.generate(_lowerCamelCase , max_new_tokens=1_0 , do_sample=_lowerCamelCase )
UpperCamelCase_ = greedy_ids[:, input_ids.shape[1] :]
UpperCamelCase_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase_ = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1_0 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase_ = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(_lowerCamelCase )
UpperCamelCase_ = -1
UpperCamelCase_ = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCamelCase_ = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCamelCase_ = cs.out[:-1] # Remove the final "\n"
UpperCamelCase_ = tokenizer(_lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCamelCase )
UpperCamelCase_ = -1
UpperCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
UpperCamelCase_ = TextIteratorStreamer(_lowerCamelCase , timeout=0.001 )
UpperCamelCase_ = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
UpperCamelCase_ = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
UpperCamelCase_ = ''''''
for new_text in streamer:
streamer_text += new_text
| 354
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Optional[Any] = CpmAntTokenizer
A__ : Tuple = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase_ = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
UpperCamelCase_ = """今天天气真好!"""
UpperCamelCase_ = ["""今天""", """天气""", """真""", """好""", """!"""]
UpperCamelCase_ = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = """今天天气真好!"""
UpperCamelCase_ = [tokenizer.bos_token] + tokens
UpperCamelCase_ = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase_ = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 261
| 0
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = hidden_states.shape
_lowerCAmelCase : List[Any] = jax.image.resize(
a__ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
_lowerCAmelCase : str = self.conv(a__ )
return hidden_states
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__ ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_lowerCAmelCase : Optional[int] = self.conv(a__ )
return hidden_states
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : int = None
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = None
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : Tuple = self.in_channels if self.out_channels is None else self.out_channels
_lowerCAmelCase : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase : Tuple = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Optional[Any] = nn.Dense(a__ , dtype=self.dtype )
_lowerCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase : Any = nn.Dropout(self.dropout_prob )
_lowerCAmelCase : Optional[Any] = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCAmelCase : str = None
if use_nin_shortcut:
_lowerCAmelCase : Dict = nn.Conv(
a__ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , a__ , a__ , a__=True ):
_lowerCAmelCase : Union[str, Any] = hidden_states
_lowerCAmelCase : Union[str, Any] = self.norma(a__ )
_lowerCAmelCase : Union[str, Any] = nn.swish(a__ )
_lowerCAmelCase : Dict = self.conva(a__ )
_lowerCAmelCase : Any = self.time_emb_proj(nn.swish(a__ ) )
_lowerCAmelCase : int = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 )
_lowerCAmelCase : Optional[Any] = hidden_states + temb
_lowerCAmelCase : Optional[Any] = self.norma(a__ )
_lowerCAmelCase : int = nn.swish(a__ )
_lowerCAmelCase : Optional[int] = self.dropout(a__ , a__ )
_lowerCAmelCase : List[Any] = self.conva(a__ )
if self.conv_shortcut is not None:
_lowerCAmelCase : Tuple = self.conv_shortcut(a__ )
return hidden_states + residual
| 44
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class _lowerCamelCase( _a ):
lowercase_ : Union[str, Any] = """timm_backbone"""
def __init__( self, lowerCamelCase=None, lowerCamelCase=3, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=None, **lowerCamelCase, ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Tuple = backbone
_lowercase : int = num_channels
_lowercase : int = features_only
_lowercase : List[str] = use_pretrained_backbone
_lowercase : Tuple = True
_lowercase : int = out_indices if out_indices is not None else (-1,)
| 84
|
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE : Tuple = "docs/source/en/_toctree.yml"
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : Tuple = defaultdict(lowerCamelCase_ )
_lowercase : int = []
_lowercase : str = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(lowerCamelCase_ )
_lowercase : Optional[int] = new_doc_list
_lowercase : str = [key for key, value in counts.items() if value > 1]
_lowercase : Union[str, Any] = []
for duplicate_key in duplicates:
_lowercase : Optional[Any] = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(lowerCamelCase_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowercase : Optional[int] = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCamelCase_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(lowerCamelCase_ )
# Sort
return overview_doc
def UpperCamelCase_( lowerCamelCase_=False ) -> Any:
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_lowercase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowercase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowercase : Tuple = content[api_idx]['sections']
# Then to the model doc
_lowercase : int = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowercase : Optional[int] = api_doc[scheduler_idx]['sections']
_lowercase : List[Any] = clean_doc_toc(lowerCamelCase_ )
_lowercase : Optional[Any] = False
if new_scheduler_doc != scheduler_doc:
_lowercase : Optional[Any] = True
if overwrite:
_lowercase : str = new_scheduler_doc
if diff:
if overwrite:
_lowercase : Any = api_doc
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def UpperCamelCase_( lowerCamelCase_=False ) -> List[str]:
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_lowercase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowercase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowercase : str = content[api_idx]['sections']
# Then to the model doc
_lowercase : Tuple = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowercase : Tuple = False
_lowercase : Dict = api_doc[pipeline_idx]['sections']
_lowercase : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowercase : Union[str, Any] = pipeline_doc['section']
_lowercase : List[str] = clean_doc_toc(lowerCamelCase_ )
if overwrite:
_lowercase : str = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCamelCase_ )
# sort overall pipeline doc
_lowercase : int = clean_doc_toc(lowerCamelCase_ )
if new_pipeline_docs != pipeline_docs:
_lowercase : Tuple = True
if overwrite:
_lowercase : str = new_pipeline_docs
if diff:
if overwrite:
_lowercase : List[str] = api_doc
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 84
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase ( a ):
lowercase__ : Tuple = """umt5"""
lowercase__ : Tuple = ["""past_key_values"""]
def __init__( self : Optional[Any] , _UpperCamelCase : List[Any]=250_112 , _UpperCamelCase : Tuple=512 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Dict=1_024 , _UpperCamelCase : int=8 , _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=6 , _UpperCamelCase : int=32 , _UpperCamelCase : str=128 , _UpperCamelCase : str=0.1 , _UpperCamelCase : List[str]=1e-6 , _UpperCamelCase : Any=1.0 , _UpperCamelCase : Optional[Any]="gated-gelu" , _UpperCamelCase : Any=True , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[Any]="T5Tokenizer" , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : int=0 , _UpperCamelCase : List[Any]=1 , _UpperCamelCase : str=0 , **_UpperCamelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
is_encoder_decoder=_UpperCamelCase , tokenizer_class=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = d_kv
SCREAMING_SNAKE_CASE = d_ff
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = relative_attention_num_buckets
SCREAMING_SNAKE_CASE = relative_attention_max_distance
SCREAMING_SNAKE_CASE = dropout_rate
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = feed_forward_proj
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = self.feed_forward_proj.split("-" )
SCREAMING_SNAKE_CASE = act_info[-1]
SCREAMING_SNAKE_CASE = act_info[0] == "gated"
if len(_UpperCamelCase ) > 1 and act_info[0] != "gated" or len(_UpperCamelCase ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE = "gelu_new"
@property
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
return self.d_model
@property
def __snake_case( self : List[str] ) -> Dict:
'''simple docstring'''
return self.num_heads
@property
def __snake_case( self : Any ) -> int:
'''simple docstring'''
return self.num_layers
class lowercase ( a ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __snake_case( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
SCREAMING_SNAKE_CASE = "past_encoder_sequence + sequence"
SCREAMING_SNAKE_CASE = {0: "batch"}
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_UpperCamelCase , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
return 13
@property
def __snake_case( self : Optional[int] ) -> float:
'''simple docstring'''
return 5e-4
| 355
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( a ):
def __snake_case( self : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCamelCase , "tf_padding" ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , "depth_multiplier" ) )
class lowercase :
def __init__( self : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any=13 , _UpperCamelCase : Any=3 , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : Optional[Any]=0.2_5 , _UpperCamelCase : int=8 , _UpperCamelCase : str=True , _UpperCamelCase : Any=1_024 , _UpperCamelCase : Tuple=32 , _UpperCamelCase : List[str]="relu6" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : int=True , _UpperCamelCase : int=True , _UpperCamelCase : Optional[Any]=10 , _UpperCamelCase : List[str]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = depth_multiplier
SCREAMING_SNAKE_CASE = min_depth
SCREAMING_SNAKE_CASE = tf_padding
SCREAMING_SNAKE_CASE = int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE = output_stride
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = classifier_dropout_prob
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case( self : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileNetVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case( self : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MobileNetVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Dict = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase__ : Tuple = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Union[str, Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : Tuple = False
lowercase__ : List[str] = False
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE = MobileNetVaConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def __snake_case( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def __snake_case( self : Optional[int] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(_UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 26
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def __snake_case( self : int ) -> str:
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = MobileNetVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def __snake_case( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 206
| 0
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {"""vocab_file""": """spiece.model"""}
__SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
__SCREAMING_SNAKE_CASE : int = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
__SCREAMING_SNAKE_CASE : Tuple = """▁"""
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = VOCAB_FILES_NAMES
__UpperCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , A : Optional[int] , A : Any=True , A : List[Any]=True , A : Optional[Any]=False , A : Any="[CLS]" , A : List[str]="[SEP]" , A : str="<unk>" , A : Any="[SEP]" , A : int="<pad>" , A : Union[str, Any]="[CLS]" , A : List[str]="[MASK]" , A : Optional[Dict[str, Any]] = None , **A : int , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase : Any = (
AddedToken(A , lstrip=A , rstrip=A , normalized=A )
if isinstance(A , A )
else mask_token
)
_UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : List[Any] = do_lower_case
_UpperCAmelCase : str = remove_space
_UpperCAmelCase : Optional[Any] = keep_accents
_UpperCAmelCase : Optional[int] = vocab_file
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def _A ( self : str ):
return len(self.sp_model )
def _A ( self : Any ):
_UpperCAmelCase : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
_UpperCAmelCase : int = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
return state
def __setstate__( self : List[str] , A : int ):
_UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self : List[Any] , A : Optional[int] ):
if self.remove_space:
_UpperCAmelCase : int = " ".join(inputs.strip().split() )
else:
_UpperCAmelCase : Tuple = inputs
_UpperCAmelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_UpperCAmelCase : Union[str, Any] = unicodedata.normalize("NFKD" , A )
_UpperCAmelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
_UpperCAmelCase : int = outputs.lower()
return outputs
def _A ( self : str , A : str ):
_UpperCAmelCase : Union[str, Any] = self.preprocess_text(A )
_UpperCAmelCase : Optional[int] = self.sp_model.encode(A , out_type=A )
_UpperCAmelCase : int = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_UpperCAmelCase : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_UpperCAmelCase : Optional[Any] = cur_pieces[1:]
else:
_UpperCAmelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def _A ( self : Union[str, Any] , A : Optional[Any] ):
return self.sp_model.PieceToId(A )
def _A ( self : int , A : Optional[Any] ):
return self.sp_model.IdToPiece(A )
def _A ( self : int , A : Tuple ):
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : List[Any] = ""
_UpperCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : List[str] = []
else:
current_sub_tokens.append(A )
_UpperCAmelCase : List[Any] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def _A ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None ):
_UpperCAmelCase : List[str] = [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A ( self : Tuple , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def _A ( self : int , A : List[int] , A : Optional[List[int]] = None ):
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : Union[str, Any] , A : str , A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , "wb" ) as fi:
_UpperCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 31
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Any , A : Optional[int]=None , A : Tuple=None , *A : Tuple , **A : List[str] ):
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_UpperCAmelCase : str = self.model.config
else:
_UpperCAmelCase : List[str] = config
_UpperCAmelCase : List[Any] = data_args
_UpperCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
_UpperCAmelCase : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase : Dict = label_smoothed_nll_loss
def _A ( self : Tuple , A : int ):
if self.optimizer is None:
_UpperCAmelCase : Tuple = ["bias", "LayerNorm.weight"]
_UpperCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_UpperCAmelCase : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase : List[str] = Adafactor
_UpperCAmelCase : List[Any] = {"scale_parameter": False, "relative_step": False}
else:
_UpperCAmelCase : List[str] = AdamW
_UpperCAmelCase : List[str] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_UpperCAmelCase : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase : List[Any] = OSS(
params=A , optim=A , **A , )
else:
_UpperCAmelCase : Union[str, Any] = optimizer_cls(A , **A )
if self.lr_scheduler is None:
_UpperCAmelCase : List[str] = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _A ( self : List[str] , A : Optional[int] ):
_UpperCAmelCase : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def _A ( self : Tuple ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _A ( self : Any , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase : List[str] = model(**A , use_cache=A )[0]
_UpperCAmelCase : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase : Any = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase : Optional[int] = model(**A , use_cache=A )[0]
_UpperCAmelCase : List[str] = torch.nn.functional.log_softmax(A , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _A ( self : List[str] , A : Optional[int] , A : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = inputs.pop("labels" )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._compute_loss(A , A , A )
return loss
def _A ( self : List[str] , A : nn.Module , A : Dict[str, Union[torch.Tensor, Any]] , A : bool , A : Optional[List[str]] = None , ):
_UpperCAmelCase : List[str] = self._prepare_inputs(A )
_UpperCAmelCase : Dict = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : int = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
_UpperCAmelCase : Any = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase : str = self._compute_loss(A , A , A )
_UpperCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _A ( self : Dict , A : int , A : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F""" padded to `max_length`={max_length}""" )
_UpperCAmelCase : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase : Tuple = tensor
return padded_tensor
| 31
| 1
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
__A : Any = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
__A : int = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
__A : str = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def a__ ( self :int ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) ,codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] ,reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] ,)
def a__ ( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ):
snake_case_ : List[str] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
snake_case_ : Union[str, Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
snake_case_ : Optional[int] = evaluate(dataset=_UpperCamelCase ,predictions=_UpperCamelCase )
return score
| 8
|
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Any = tmp_path / """file.csv"""
snake_case_ : Any = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Optional[int] = tmp_path / """malformed_file.csv"""
snake_case_ : int = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : str = tmp_path / """csv_with_image.csv"""
snake_case_ : int = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : int = tmp_path / """csv_with_label.csv"""
snake_case_ : Tuple = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv"""
snake_case_ : str = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ):
'''simple docstring'''
snake_case_ : int = Csv()
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(lowerCamelCase_ ) in record.message
for record in caplog.records )
@require_pil
def UpperCAmelCase ( lowerCamelCase_ :Tuple ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
snake_case_ : Tuple = f.read().splitlines()[1]
snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] )
snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
snake_case_ : List[str] = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
snake_case_ : List[Any] = f.read().splitlines()[1:]
snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels]
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} )
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
snake_case_ : Dict = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 8
| 1
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:List[str] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
__a = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
@torch.no_grad()
def _lowerCamelCase( a , a , a=None ):
# load the pre-trained checkpoints
__a = torch.load(a )
__a = WavLMConfigOrig(checkpoint["cfg"] )
__a = WavLMOrig(a )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
__a = WavLMConfig.from_pretrained(a )
else:
__a = WavLMConfig()
__a = WavLMModel(a )
recursively_load_weights(a , a )
hf_wavlm.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE__:Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 261
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[str] = "timm_backbone"
def __init__( self : List[Any] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : Union[str, Any] , ) -> List[Any]:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = backbone
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = features_only
__SCREAMING_SNAKE_CASE = use_pretrained_backbone
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = out_indices if out_indices is not None else (-1,)
| 195
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase__ : WhisperForConditionalGeneration , UpperCAmelCase__ : WhisperProcessor , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : CLIPTextModel , UpperCAmelCase__ : CLIPTokenizer , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase__ : StableDiffusionSafetyChecker , UpperCAmelCase__ : CLIPImageProcessor , ) -> Optional[int]:
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=UpperCAmelCase__ , speech_processor=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> str:
if slice_size == "auto":
__SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=1_6_0_0_0 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_0 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Dict , ) -> Any:
__SCREAMING_SNAKE_CASE = self.speech_processor.feature_extractor(
UpperCAmelCase__ , return_tensors="pt" , sampling_rate=UpperCAmelCase__ ).input_features.to(self.device )
__SCREAMING_SNAKE_CASE = self.speech_model.generate(UpperCAmelCase__ , max_length=4_8_0_0_0_0 )
__SCREAMING_SNAKE_CASE = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , normalize=UpperCAmelCase__ )[
0
]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = 1
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase__ )}.""" )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
__SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text_embeddings.shape
__SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
__SCREAMING_SNAKE_CASE = [""] * batch_size
elif type(UpperCAmelCase__ ) is not type(UpperCAmelCase__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase__ )} !="""
F""" {type(UpperCAmelCase__ )}.""" )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(UpperCAmelCase__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device="cpu" , dtype=UpperCAmelCase__ ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__SCREAMING_SNAKE_CASE = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
__SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1 / 0.18_215 * latents
__SCREAMING_SNAKE_CASE = self.vae.decode(UpperCAmelCase__ ).sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase__ , nsfw_content_detected=UpperCAmelCase__ )
| 195
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "bert-generation"
def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Any = vocab_size
lowerCAmelCase_ :List[Any] = hidden_size
lowerCAmelCase_ :Optional[int] = num_hidden_layers
lowerCAmelCase_ :int = num_attention_heads
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Optional[Any] = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_dropout_prob
lowerCAmelCase_ :int = attention_probs_dropout_prob
lowerCAmelCase_ :Tuple = max_position_embeddings
lowerCAmelCase_ :List[str] = initializer_range
lowerCAmelCase_ :Union[str, Any] = layer_norm_eps
lowerCAmelCase_ :List[str] = position_embedding_type
lowerCAmelCase_ :Optional[int] = use_cache
| 84
| 1
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def wrapper(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
UpperCamelCase = timeit.default_timer()
UpperCamelCase = func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = timeit.default_timer() - starttime
return delta
UpperCamelCase = func.__name__
return wrapper
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = seq_shapes or {}
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_SCREAMING_SNAKE_CASE , _ArrayXD ):
UpperCamelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_SCREAMING_SNAKE_CASE , datasets.Value ):
if v.dtype == "string":
UpperCamelCase = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCamelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_SCREAMING_SNAKE_CASE , datasets.Sequence ):
while isinstance(_SCREAMING_SNAKE_CASE , datasets.Sequence ):
UpperCamelCase = v.feature
UpperCamelCase = seq_shapes[k]
UpperCamelCase = np.random.rand(*_SCREAMING_SNAKE_CASE ).astype(v.dtype )
UpperCamelCase = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase = generate_examples(_SCREAMING_SNAKE_CASE , num_examples=_SCREAMING_SNAKE_CASE , seq_shapes=_SCREAMING_SNAKE_CASE )
with ArrowWriter(features=_SCREAMING_SNAKE_CASE , path=_SCREAMING_SNAKE_CASE ) as writer:
for key, record in dummy_data:
UpperCamelCase = features.encode_example(_SCREAMING_SNAKE_CASE )
writer.write(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCamelCase = datasets.Dataset.from_file(filename=_SCREAMING_SNAKE_CASE , info=datasets.DatasetInfo(features=_SCREAMING_SNAKE_CASE ) )
return dataset
| 368
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType('''DataClass''', Any)
lowerCAmelCase__ = NewType('''DataClassType''', Any)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {str(_SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda _SCREAMING_SNAKE_CASE : str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( *,
_SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCamelCase = {}
if aliases is not None:
UpperCamelCase = aliases
if help is not None:
UpperCamelCase = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = 42
def __init__(self , __a , **__a ) -> Any:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCamelCase = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
UpperCamelCase = [dataclass_types]
UpperCamelCase = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def snake_case_ (__a , __a ) -> Optional[Any]:
UpperCamelCase = F"--{field.name}"
UpperCamelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCamelCase = kwargs.pop("aliases" , [] )
if isinstance(__a , __a ):
UpperCamelCase = [aliases]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__a , "UnionType" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F" Problem encountered in field '{field.name}'." )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
UpperCamelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCamelCase = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCamelCase = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
UpperCamelCase = field.type.__args__
else:
UpperCamelCase = [x.value for x in field.type]
UpperCamelCase = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
else:
UpperCamelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCamelCase = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
UpperCamelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCamelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCamelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCamelCase = "?"
# This is the value that will get picked if we do --field_name (without value)
UpperCamelCase = True
elif isclass(__a ) and issubclass(__a , __a ):
UpperCamelCase = field.type.__args__[0]
UpperCamelCase = "+"
if field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCamelCase = True
else:
UpperCamelCase = field.type
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
else:
UpperCamelCase = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCamelCase = False
parser.add_argument(F"--no_{field.name}" , action="store_false" , dest=field.name , **__a )
def snake_case_ (self , __a ) -> List[Any]:
if hasattr(__a , "_argument_group_name" ):
UpperCamelCase = self.add_argument_group(dtype._argument_group_name )
else:
UpperCamelCase = self
try:
UpperCamelCase = get_type_hints(__a )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__a ):
UpperCamelCase = ".".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
UpperCamelCase = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def snake_case_ (self , __a=None , __a=False , __a=True , __a=None , __a=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCamelCase = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCamelCase = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCamelCase , UpperCamelCase = args_file_parser.parse_known_args(args=__a )
UpperCamelCase = vars(__a ).get(args_file_flag.lstrip("-" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
UpperCamelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCamelCase = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCamelCase , UpperCamelCase = self.parse_known_args(args=__a )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = set(args.keys() )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(__a )}" )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
with open(Path(__a ) , encoding="utf-8" ) as open_json_file:
UpperCamelCase = json.loads(open_json_file.read() )
UpperCamelCase = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a )
| 244
| 0
|
def a( A : Dict ) -> Optional[int]:
"""simple docstring"""
for i in range(len(lowerCamelCase__ ) - 1 , 0 , -1 ):
a = False
for j in range(lowerCamelCase__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
a = unsorted[j - 1], unsorted[j]
a = True
for j in range(lowerCamelCase__ ):
if unsorted[j] > unsorted[j + 1]:
a = unsorted[j + 1], unsorted[j]
a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase: Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
_lowercase: Optional[int] = [int(item) for item in user_input.split(",")]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 227
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ):
A_ : List[Any] = parent
A_ : str = batch_size
A_ : List[Any] = seq_length
A_ : Dict = is_training
A_ : List[Any] = use_attention_mask
A_ : Any = use_token_type_ids
A_ : Optional[int] = use_labels
A_ : Tuple = vocab_size
A_ : List[str] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Union[str, Any] = type_vocab_size
A_ : int = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[str] = num_choices
def _a (self ):
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Any = None
if self.use_attention_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Union[str, Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowercase , )
return config, input_ids, attention_mask
def _a (self ):
A_ : List[str] = self.prepare_config_and_inputs()
A_, A_, A_ : str = config_and_inputs
A_ : Any = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a (self ):
A_ : Tuple = FlaxDistilBertModelTester(self )
@slow
def _a (self ):
for model_class_name in self.all_model_classes:
A_ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" )
A_ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : List[str] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A_ : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A_ : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A_ : Optional[int] = model(lowercase , attention_mask=lowercase )[0]
A_ : Optional[Any] = (1, 11, 768)
self.assertEqual(output.shape , lowercase )
A_ : Union[str, Any] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) )
| 206
| 0
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 212
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_A = open # noqa: we just need to have a builtin inside this module to test it properly
| 212
| 1
|
import datasets
from .evaluate import evaluate
lowerCAmelCase_ = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
lowerCAmelCase_ = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
lowerCAmelCase_ = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : str ) ->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : Any ) ->List[str]:
snake_case_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
snake_case_ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
snake_case_ = evaluate(dataset=_UpperCamelCase , predictions=_UpperCamelCase )
return score
| 8
|
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"]
def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any:
requires_backends(self , ['''note_seq'''] )
@classmethod
def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]:
requires_backends(cls , ['''note_seq'''] )
| 8
| 1
|
UpperCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __lowerCamelCase ( snake_case__ ) -> bytes:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(snake_case__ )
_SCREAMING_SNAKE_CASE = """""".join(bin(snake_case__ )[2:].zfill(8 ) for byte in data )
_SCREAMING_SNAKE_CASE = len(snake_case__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
_SCREAMING_SNAKE_CASE = b"""=""" * ((6 - len(snake_case__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(snake_case__ ) % 6)
else:
_SCREAMING_SNAKE_CASE = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] ,2 )]
for index in range(0 ,len(snake_case__ ) ,6 ) ).encode()
+ padding
)
def __lowerCamelCase ( snake_case__ ) -> bytes:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) and not isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
"""argument should be a bytes-like object or ASCII string, """
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(snake_case__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(snake_case__ ,snake_case__ ):
try:
_SCREAMING_SNAKE_CASE = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
_SCREAMING_SNAKE_CASE = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(snake_case__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_SCREAMING_SNAKE_CASE = encoded_data[:-padding]
_SCREAMING_SNAKE_CASE = """""".join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_SCREAMING_SNAKE_CASE = """""".join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )
_SCREAMING_SNAKE_CASE = [
int(binary_stream[index : index + 8] ,2 )
for index in range(0 ,len(snake_case__ ) ,8 )
]
return bytes(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __UpperCAmelCase :
def __init__( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: str=True , UpperCAmelCase_: List[str]=99 , UpperCAmelCase_: int=32 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Dict=37 , UpperCAmelCase_: Tuple="gelu" , UpperCAmelCase_: Any=0.1 , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Optional[int]=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[int]=0.02 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: Optional[int]=4 , UpperCAmelCase_: Tuple=None , UpperCAmelCase_: Any=0 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = projection_dim
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
_SCREAMING_SNAKE_CASE = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRReader(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__snake_case : Optional[Any] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__snake_case : str = False
__snake_case : List[Any] = False
__snake_case : Any = False
__snake_case : List[Any] = False
__snake_case : Tuple = False
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
_SCREAMING_SNAKE_CASE = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125
| 0
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = """encodec"""
def __init__( self , snake_case=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case=2_4000 , snake_case=1 , snake_case=False , snake_case=None , snake_case=None , snake_case=128 , snake_case=32 , snake_case=1 , snake_case=[8, 5, 4, 2] , snake_case="weight_norm" , snake_case=7 , snake_case=7 , snake_case=3 , snake_case=2 , snake_case=True , snake_case="reflect" , snake_case=2 , snake_case=2 , snake_case=1.0 , snake_case=1024 , snake_case=None , snake_case=True , **snake_case , ):
lowercase = target_bandwidths
lowercase = sampling_rate
lowercase = audio_channels
lowercase = normalize
lowercase = chunk_length_s
lowercase = overlap
lowercase = hidden_size
lowercase = num_filters
lowercase = num_residual_layers
lowercase = upsampling_ratios
lowercase = norm_type
lowercase = kernel_size
lowercase = last_kernel_size
lowercase = residual_kernel_size
lowercase = dilation_growth_rate
lowercase = use_causal_conv
lowercase = pad_mode
lowercase = compress
lowercase = num_lstm_layers
lowercase = trim_right_ratio
lowercase = codebook_size
lowercase = codebook_dim if codebook_dim is not None else hidden_size
lowercase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 195
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase = mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
lowercase = max(
mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , )
lowercase = val
return f[i][j]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if not (isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
lowercase = len(__SCREAMING_SNAKE_CASE )
if num_items != len(__SCREAMING_SNAKE_CASE ):
lowercase = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(__SCREAMING_SNAKE_CASE )} values'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
if not isinstance(wt[i] , __SCREAMING_SNAKE_CASE ):
lowercase = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = set()
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return optimal_val, example_optional_set
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
optimal_set.add(__SCREAMING_SNAKE_CASE )
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = [3, 2, 4, 4]
UpperCAmelCase = [4, 3, 2, 3]
UpperCAmelCase = 4
UpperCAmelCase = 6
UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 195
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''ZinengTang/tvlt-base'''
lowerCAmelCase_ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor, UpperCamelCase__ )
self.assertIsInstance(processor.image_processor, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
lowerCAmelCase_ = np.ones([1_2000] )
lowerCAmelCase_ = feature_extractor(UpperCamelCase__, return_tensors='''np''' )
lowerCAmelCase_ = processor(audio=UpperCamelCase__, return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum(), input_processor[key].sum(), delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
lowerCAmelCase_ = np.ones([3, 224, 224] )
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''np''' )
lowerCAmelCase_ = processor(images=UpperCamelCase__, return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum(), input_processor[key].sum(), delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
lowerCAmelCase_ = np.ones([1_2000] )
lowerCAmelCase_ = np.ones([3, 224, 224] )
lowerCAmelCase_ = processor(audio=UpperCamelCase__, images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_feature_extractor()
lowerCAmelCase_ = TvltProcessor(image_processor=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
self.assertListEqual(
processor.model_input_names, image_processor.model_input_names + feature_extractor.model_input_names, msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''', )
| 167
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
raise NotImplementedError()
| 167
| 1
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=1_3 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : List[Any]=1_6 , SCREAMING_SNAKE_CASE_ : List[Any]=[1, 2, 1] , SCREAMING_SNAKE_CASE_ : Dict=[2, 2, 4] , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : str=2.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Dict=1e-5 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=8 , ) -> Optional[int]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Union[str, Any] ) -> str:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
lowercase_ = SwinvaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
lowercase_ = SwinvaForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = SwinvaForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
lowercase_ = self.type_sequence_label_size
lowercase_ = SwinvaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Any ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a :Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a :Optional[int] = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
a :str = False
a :str = False
a :Any = False
a :str = False
def _lowercase ( self : str ) -> Union[str, Any]:
lowercase_ = SwinvaModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=3_7 )
def _lowercase ( self : List[str] ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def _lowercase ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def _lowercase ( self : Union[str, Any] ) -> Any:
pass
def _lowercase ( self : Tuple ) -> Any:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def _lowercase ( self : Union[str, Any] ) -> str:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> Union[str, Any]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
lowercase_ = True
lowercase_ = False
lowercase_ = True
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = outputs.attentions
lowercase_ = len(self.model_tester.depths )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ = True
lowercase_ = config.window_size**2
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowercase_ = True
lowercase_ = True
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
lowercase_ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase_ = 2
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swinv2 has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : Any ) -> Optional[Any]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> Tuple:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Optional[Any] ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = SwinvaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : int ) -> Union[str, Any]:
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : str ) -> Any:
lowercase_ = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
SCREAMING_SNAKE_CASE_ )
lowercase_ = self.default_image_processor
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 30
|
from collections.abc import Generator
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = 0, 1
while True:
UpperCamelCase__ , UpperCamelCase__ = b, a + b
yield b
def __magic_name__ ( __a : int = 1_000 ):
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = fibonacci_generator()
while len(str(next(__a ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 244
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : int = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class a__ ( __snake_case ):
A__ : Optional[int] = 'roc_bert'
def __init__( self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=True , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=7_6_8 , UpperCAmelCase=9_1_0 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2_4_8_5_8 , UpperCAmelCase=True , **UpperCAmelCase , ) -> Tuple:
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = enable_pronunciation
__a = enable_shape
__a = pronunciation_embed_dim
__a = pronunciation_vocab_size
__a = shape_embed_dim
__a = shape_vocab_size
__a = concat_input
__a = position_embedding_type
__a = classifier_dropout
super().__init__(pad_token_id=__A , **__A )
| 359
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class a__ ( __snake_case ):
A__ : Dict = 'fnet'
def __init__( self , UpperCAmelCase=3_2_0_0_0 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=4 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=False , UpperCAmelCase=5_1_2 , UpperCAmelCase=3 , UpperCAmelCase=1 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Dict:
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_tpu_fourier_optimizations
__a = tpu_short_seq_length
| 197
| 0
|
class A__ :
def __init__( self : int , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = val
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = None
def _lowerCamelCase ( self : List[str] , a : Optional[int] ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase__ : Optional[int] = Node(a )
else:
self.left.insert(a )
elif val > self.val:
if self.right is None:
lowerCAmelCase__ : Union[str, Any] = Node(a )
else:
self.right.insert(a )
else:
lowerCAmelCase__ : Union[str, Any] = val
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# Recursive traversal
if root:
inorder(root.left , SCREAMING_SNAKE_CASE_ )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
# Build BST
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return arr
lowerCAmelCase__ : List[str] = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase__ : Tuple = []
inorder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 212
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A__ :
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def _lowerCamelCase ( self : Optional[int] , a : List[Any]=True , a : Any=False , a : Dict=False , a : Union[str, Any]=False , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 4
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : Tuple = (32, 32)
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = torch.device(a )
lowerCAmelCase__ : str = (batch_size, num_channels) + sizes
lowerCAmelCase__ : Tuple = randn_tensor(a , generator=a , device=a )
lowerCAmelCase__ : Optional[Any] = {'hidden_states': hidden_states}
if include_temb:
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : List[str] = randn_tensor((batch_size, temb_channels) , generator=a , device=a )
if include_res_hidden_states_tuple:
lowerCAmelCase__ : int = torch.manual_seed(1 )
lowerCAmelCase__ : str = (randn_tensor(a , generator=a , device=a ),)
if include_encoder_hidden_states:
lowerCAmelCase__ : Any = floats_tensor((batch_size, 32, 32) ).to(a )
if include_skip_sample:
lowerCAmelCase__ : Union[str, Any] = randn_tensor(((batch_size, 3) + sizes) , generator=a , device=a )
return dummy_input
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
lowerCAmelCase__ : Union[str, Any] = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
lowerCAmelCase__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self : str , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : int = self.block_class(**a )
unet_block.to(a )
unet_block.eval()
with torch.no_grad():
lowerCAmelCase__ : int = unet_block(**a )
if isinstance(a , a ):
lowerCAmelCase__ : List[str] = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:]
lowerCAmelCase__ : Any = torch.tensor(a ).to(a )
assert torch_all_close(output_slice.flatten() , a , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : Any = self.block_class(**a )
model.to(a )
model.train()
lowerCAmelCase__ : int = model(**a )
if isinstance(a , a ):
lowerCAmelCase__ : Dict = output[0]
lowerCAmelCase__ : Optional[int] = torch.device(a )
lowerCAmelCase__ : List[Any] = randn_tensor(output.shape , device=a )
lowerCAmelCase__ : List[Any] = torch.nn.functional.mse_loss(a , a )
loss.backward()
| 212
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCamelCase__ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCamelCase__ = """</w>"""
UpperCamelCase__ = """@@ """
def _a ( SCREAMING_SNAKE_CASE_ : Dict ):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {"""facebook/s2t-wav2vec2-large-en-de""": 1024}
class a__ ( snake_case__ ):
_a : Optional[Any] = VOCAB_FILES_NAMES
_a : int = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , _A , _A="<s>" , _A="<pad>" , _A="</s>" , _A="<unk>" , _A=False , _A=None , **_A , ):
"""simple docstring"""
super().__init__(
unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , do_lower_case=_A , **_A , )
__lowerCAmelCase = do_lower_case
with open(_A , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(_A )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
__lowerCAmelCase = None
__lowerCAmelCase = None
else:
with open(_A , encoding="utf-8" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("\n" )[:-1]
__lowerCAmelCase = [tuple(merge.split()[:2] ) for merge in merges]
__lowerCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
__lowerCAmelCase = {}
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return len(self.decoder )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = get_pairs(_A )
if not pairs:
return token
while True:
__lowerCAmelCase = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(_A ):
try:
__lowerCAmelCase = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(_A )
__lowerCAmelCase = new_word
if len(_A ) == 1:
break
else:
__lowerCAmelCase = get_pairs(_A )
__lowerCAmelCase = " ".join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
__lowerCAmelCase = "\n" + BPE_TOKEN_MERGES
if word.endswith(_A ):
__lowerCAmelCase = word.replace(_A , "" )
__lowerCAmelCase = word.replace(" " , _A )
__lowerCAmelCase = word
return word
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
__lowerCAmelCase = text.lower()
__lowerCAmelCase = text.split()
__lowerCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(" " ) ) )
return split_tokens
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.decoder.get(_A , self.unk_token )
return result
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = " ".join(_A )
# make sure @@ tokens are concatenated
__lowerCAmelCase = "".join(string.split(_A ) )
return string
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + "\n" )
__lowerCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowerCAmelCase = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 102
|
import enum
import shutil
import sys
UpperCamelCase__ , UpperCamelCase__ = shutil.get_terminal_size()
UpperCamelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class a__ ( enum.Enum ):
_a : Any = 0
_a : Dict = 1
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict="" ):
sys.stdout.write(str(SCREAMING_SNAKE_CASE_ ) + end )
sys.stdout.flush()
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str="" ):
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , SCREAMING_SNAKE_CASE_ )
def _a ( ):
forceWrite("\r" )
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ):
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def _a ( ):
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def _a ( ):
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 102
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_a = logging.get_logger(__name__)
_a = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "dpt"
def __init__( self , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=384 , UpperCAmelCase=16 , UpperCAmelCase=3 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=[2, 5, 8, 11] , UpperCAmelCase="project" , UpperCAmelCase=[4, 2, 1, 0.5] , UpperCAmelCase=[96, 192, 384, 768] , UpperCAmelCase=256 , UpperCAmelCase=-1 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.4 , UpperCAmelCase=255 , UpperCAmelCase=0.1 , UpperCAmelCase=[1, 1024, 24, 24] , UpperCAmelCase=[0, 1] , UpperCAmelCase=None , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
_UpperCAmelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_UpperCAmelCase = BitConfig(**UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
logger.info('Initializing the config with a `BiT` backbone.' )
_UpperCAmelCase = BitConfig(**UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = []
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
_UpperCAmelCase = readout_type
_UpperCAmelCase = reassemble_factors
_UpperCAmelCase = neck_hidden_sizes
_UpperCAmelCase = fusion_hidden_size
_UpperCAmelCase = head_in_index
_UpperCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = semantic_loss_ignore_index
_UpperCAmelCase = semantic_classifier_dropout
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 39
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class __a (lowerCamelCase ):
__a : Union[str, Any] = "lilt"
def __init__( self : Any , __magic_name__ : Tuple=3_05_22 , __magic_name__ : str=7_68 , __magic_name__ : Tuple=12 , __magic_name__ : int=12 , __magic_name__ : str=30_72 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : List[Any]=2 , __magic_name__ : Dict=0.0_2 , __magic_name__ : List[Any]=1E-12 , __magic_name__ : List[str]=0 , __magic_name__ : List[str]="absolute" , __magic_name__ : str=None , __magic_name__ : Dict=4 , __magic_name__ : str=10_24 , **__magic_name__ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : int = position_embedding_type
UpperCAmelCase_ : Tuple = classifier_dropout
UpperCAmelCase_ : Dict = channel_shrink_ratio
UpperCAmelCase_ : int = max_ad_position_embeddings
| 125
| 0
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A : Any = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class A ( lowercase__ ):
'''simple docstring'''
def __init__(self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Tuple=1 ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tokenizer
lowercase__ = dataset
lowercase__ = len(lowercase_ ) if n_tasks is None else n_tasks
lowercase__ = n_copies
def __iter__(self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
lowercase__ = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class A ( lowercase__ ):
'''simple docstring'''
def __init__(self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
lowercase__ = start_length
lowercase__ = eof_strings
lowercase__ = tokenizer
def __call__(self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , **_UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowercase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowercase_ )
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = re.split("""(%s)""" % """|""".join(__lowerCamelCase ) , __lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Dict=20 , **__magic_name__ : int ) -> int:
"""simple docstring"""
lowercase__ = defaultdict(__lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCamelCase ) ):
with torch.no_grad():
lowercase__ = batch["ids"].shape[-1]
lowercase__ = accelerator.unwrap_model(__lowerCamelCase ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=__lowerCamelCase , **__lowerCamelCase )
# each task is generated batch_size times
lowercase__ = batch["task_id"].repeat(__lowerCamelCase )
lowercase__ = accelerator.pad_across_processes(
__lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowercase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowercase__ = generated_tokens.cpu().numpy()
lowercase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCamelCase , __lowerCamelCase ):
gen_token_dict[task].append(__lowerCamelCase )
lowercase__ = [[] for _ in range(__lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowercase__ = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
code_gens[task].append(remove_last_block(__lowerCamelCase ) )
return code_gens
def UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = HfArgumentParser(__lowerCamelCase )
lowercase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowercase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowercase__ = "false"
if args.num_workers is None:
lowercase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowercase__ = Accelerator()
set_seed(args.seed , device_specific=__lowerCamelCase )
# Load model and tokenizer
lowercase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase__ = tokenizer.eos_token
lowercase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowercase__ = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCamelCase , __lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowercase__ = load_dataset("""openai_humaneval""" )
lowercase__ = load_metric("""code_eval""" )
lowercase__ = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
lowercase__ = args.n_samples // args.batch_size
lowercase__ = TokenizedDataset(__lowerCamelCase , human_eval["""test"""] , n_copies=__lowerCamelCase , n_tasks=__lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowercase__ = DataLoader(__lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowercase__ = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
lowercase__ = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
lowercase__ = complete_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , n_tasks=__lowerCamelCase , batch_size=args.batch_size , **__lowerCamelCase , )
if accelerator.is_main_process:
lowercase__ = []
for task in tqdm(range(__lowerCamelCase ) ):
lowercase__ = human_eval["test"][task]["test"]
lowercase__ = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
lowercase__ = code_eval_metric.compute(
references=__lowerCamelCase , predictions=__lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 351
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A : List[Any] = logging.get_logger(__name__)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 146
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( __UpperCAmelCase):
@staticmethod
@abstractmethod
def a_ ( _lowerCamelCase : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def a_ ( self : str ):
"""simple docstring"""
raise NotImplementedError()
| 167
|
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase ( __UpperCAmelCase , __UpperCAmelCase):
__lowerCAmelCase : List[Any] = 1
@register_to_config
def __init__( self : Union[str, Any] , _lowerCamelCase : int = 10_00 , _lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(_lowerCamelCase )
# standard deviation of the initial noise distribution
A_ : Optional[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
A_ : List[str] = 4
# running values
A_ : Optional[int] = []
def a_ ( self : str , _lowerCamelCase : int , _lowerCamelCase : Union[str, torch.device] = None ):
"""simple docstring"""
A_ : Tuple = num_inference_steps
A_ : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
A_ : Optional[Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
A_ : str = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
A_ : int = torch.sin(steps * math.pi / 2 ) ** 2
A_ : Dict = (1.0 - self.betas**2) ** 0.5
A_ : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
A_ : int = timesteps.to(_lowerCamelCase )
A_ : int = []
def a_ ( self : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
A_ : Union[str, Any] = (self.timesteps == timestep).nonzero().item()
A_ : Dict = timestep_index + 1
A_ : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCamelCase )
if len(self.ets ) == 1:
A_ : Dict = self.ets[-1]
elif len(self.ets ) == 2:
A_ : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
A_ : Optional[Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
A_ : str = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
A_ : Union[str, Any] = self._get_prev_sample(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : torch.FloatTensor , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return sample
def a_ ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.alphas[timestep_index]
A_ : Union[str, Any] = self.betas[timestep_index]
A_ : int = self.alphas[prev_timestep_index]
A_ : Tuple = self.betas[prev_timestep_index]
A_ : str = (sample - sigma * ets) / max(_lowerCamelCase , 1E-8 )
A_ : List[str] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 167
| 1
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
snake_case_ : int = datasets.logging.get_logger(__name__)
snake_case_ : Optional[int] = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
snake_case_ : str = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
snake_case_ : Tuple = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'sources': datasets.Value('string' ,id='sequence' ),
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] ,)
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : str ):
'''simple docstring'''
if self.config_name == "default":
_UpperCamelCase : Optional[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_UpperCamelCase : Dict = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : int=False ):
'''simple docstring'''
if gpus is None:
_UpperCamelCase : int = 1 if torch.cuda.is_available() else 0
_UpperCamelCase : int = {'src': sources, 'mt': predictions, 'ref': references}
_UpperCamelCase : List[str] = [dict(zip(lowerCamelCase__ ,lowerCamelCase__ ) ) for t in zip(*data.values() )]
_UpperCamelCase : int = self.scorer.predict(lowerCamelCase__ ,gpus=lowerCamelCase__ ,progress_bar=lowerCamelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 355
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
snake_case_ : List[str] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
snake_case_ : Tuple = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
snake_case_ : str = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
snake_case_ : str = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
snake_case_ : List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/openai/human-eval' ,codebase_urls=['https://github.com/openai/human-eval'] ,reference_urls=['https://github.com/openai/human-eval'] ,license=_LICENSE ,)
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any]=[1, 10, 100] ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : List[Any]=3.0 ):
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Optional[int] = Counter()
_UpperCamelCase : int = 0
_UpperCamelCase : Dict = defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ ,lowerCamelCase__ ) ):
for candidate in candidates:
_UpperCamelCase : int = candidate + '\n' + test_case
_UpperCamelCase : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id])
_UpperCamelCase : Dict = executor.submit(lowerCamelCase__ ,*lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
_UpperCamelCase : Dict = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
_UpperCamelCase , _UpperCamelCase : List[str] = [], []
for result in results.values():
result.sort()
_UpperCamelCase : Optional[Any] = [r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
_UpperCamelCase : List[str] = np.array(lowerCamelCase__ )
_UpperCamelCase : List[Any] = np.array(lowerCamelCase__ )
_UpperCamelCase : Tuple = k
_UpperCamelCase : Tuple = {F'pass@{k}': estimate_pass_at_k(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def estimator(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = itertools.repeat(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
else:
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = iter(UpperCAmelCase_ )
return np.array([estimator(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , UpperCAmelCase_ ) for n, c in zip(UpperCAmelCase_ , UpperCAmelCase_ )] )
| 236
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
A_ :Optional[int] = None
A_ :Any = logging.get_logger(__name__)
A_ :List[str] = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ :Any = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
A_ :int = {
'''google/rembert''': 256,
}
A_ :Tuple = '''▁'''
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : str =VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] =RemBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="[CLS]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="<unk>" , lowerCamelCase__="[SEP]" , lowerCamelCase__="<pad>" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : List[str] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : str =do_lower_case
__UpperCamelCase : List[str] =remove_space
__UpperCamelCase : Dict =keep_accents
__UpperCamelCase : Tuple =vocab_file
__UpperCamelCase : Optional[int] =False if not self.vocab_file else True
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[Any] =[self.sep_token_id]
__UpperCamelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[Any] =[self.sep_token_id]
__UpperCamelCase : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCamelCase__ ) )
return
__UpperCamelCase : List[str] =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 71
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
lowercase = nums[0]
lowercase = 0
for num in nums[1:]:
lowercase , lowercase = (
max_excluding + num,
max(lowerCAmelCase__ , lowerCAmelCase__ ),
)
return max(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197
| 0
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __lt__( self , _UpperCAmelCase ):
return self[-1] < other[-1]
def __eq__( self , _UpperCAmelCase ):
return self[-1] == other[-1]
def __A ( a_ :list) -> list:
__a : list[Stack] = []
# sort into stacks
for element in collection:
__a : Any = Stack([element])
__a : Dict = bisect_left(a_ , a_)
if i != len(a_):
stacks[i].append(a_)
else:
stacks.append(a_)
# use a heap-based merge to merge stack efficiently
__a : Optional[Any] = merge(*(reversed(a_) for stack in stacks))
return collection
if __name__ == "__main__":
A = input('''Enter numbers separated by a comma:\n''').strip()
A = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 188
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = size if size is not None else {'''height''': 18, '''width''': 18}
__a : List[Any] = parent
__a : Dict = batch_size
__a : Dict = num_channels
__a : int = image_size
__a : Optional[Any] = min_resolution
__a : Optional[int] = max_resolution
__a : Dict = do_resize
__a : List[Any] = size
__a : int = do_normalize
__a : Optional[Any] = image_mean
__a : int = image_std
def _lowerCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : int = EfficientFormerImageProcessorTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processor
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Dict = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__a : Any = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processor
__a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[int] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processor
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__a : Tuple = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 188
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='layoutlmv3'
def __init__(self , a_=5_02_65 , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-5 , a_=1 , a_=0 , a_=2 , a_=10_24 , a_=1_28 , a_=1_28 , a_=True , a_=32 , a_=1_28 , a_=64 , a_=2_56 , a_=True , a_=True , a_=True , a_=2_24 , a_=3 , a_=16 , a_=None , **a_ , ):
'''simple docstring'''
super().__init__(
vocab_size=a_ , hidden_size=a_ , num_hidden_layers=a_ , num_attention_heads=a_ , intermediate_size=a_ , hidden_act=a_ , hidden_dropout_prob=a_ , attention_probs_dropout_prob=a_ , max_position_embeddings=a_ , type_vocab_size=a_ , initializer_range=a_ , layer_norm_eps=a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
__snake_case : str = max_ad_position_embeddings
__snake_case : Optional[Any] = coordinate_size
__snake_case : str = shape_size
__snake_case : Union[str, Any] = has_relative_attention_bias
__snake_case : Dict = rel_pos_bins
__snake_case : Union[str, Any] = max_rel_pos
__snake_case : Union[str, Any] = has_spatial_attention_bias
__snake_case : List[Any] = rel_ad_pos_bins
__snake_case : List[Any] = max_rel_ad_pos
__snake_case : Dict = text_embed
__snake_case : Optional[Any] = visual_embed
__snake_case : Dict = input_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Any = patch_size
__snake_case : Union[str, Any] = classifier_dropout
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.12' )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 12
def SCREAMING_SNAKE_CASE (self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , a_ = 3 , a_ = 40 , a_ = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , a_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__snake_case : List[str] = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case : Any = processor.tokenizer.num_special_tokens_to_add(a_ )
__snake_case : Tuple = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
__snake_case : Optional[int] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__snake_case : Any = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__snake_case : int = self._generate_dummy_images(a_ , a_ , a_ , a_ )
__snake_case : Tuple = dict(
processor(
a_ , text=a_ , boxes=a_ , return_tensors=a_ , ) )
return inputs
| 102
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='yolos'
def __init__(self , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1E-12 , a_=[5_12, 8_64] , a_=16 , a_=3 , a_=True , a_=1_00 , a_=True , a_=False , a_=1 , a_=5 , a_=2 , a_=5 , a_=2 , a_=0.1 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = image_size
__snake_case : Tuple = patch_size
__snake_case : str = num_channels
__snake_case : Tuple = qkv_bias
__snake_case : Union[str, Any] = num_detection_tokens
__snake_case : List[str] = use_mid_position_embeddings
__snake_case : Tuple = auxiliary_loss
# Hungarian matcher
__snake_case : List[str] = class_cost
__snake_case : int = bbox_cost
__snake_case : int = giou_cost
# Loss coefficients
__snake_case : Optional[int] = bbox_loss_coefficient
__snake_case : List[str] = giou_loss_coefficient
__snake_case : List[Any] = eos_coefficient
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 12
| 102
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase_ : Any = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowerCamelCase_ : List[Any] = {"""facebook/blenderbot-3B""": 128}
class a__ ( __snake_case ):
A__ : int = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : str = ['input_ids', 'attention_mask']
A__ : List[str] = BlenderbotTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> str:
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase ) != add_prefix_space:
__a = getattr(UpperCAmelCase , pre_tok_state.pop('type' ) )
__a = add_prefix_space
__a = pre_tok_class(**UpperCAmelCase )
__a = add_prefix_space
__a = 'post_processor'
__a = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
__a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__a = tuple(state['sep'] )
if "cls" in state:
__a = tuple(state['cls'] )
__a = False
if state.get('add_prefix_space' , UpperCAmelCase ) != add_prefix_space:
__a = add_prefix_space
__a = True
if state.get('trim_offsets' , UpperCAmelCase ) != trim_offsets:
__a = trim_offsets
__a = True
if changes_to_apply:
__a = getattr(UpperCAmelCase , state.pop('type' ) )
__a = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __SCREAMING_SNAKE_CASE ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]:
__a = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
__a = value
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
__a = kwargs.get('is_split_into_words' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
__a = kwargs.get('is_split_into_words' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
__a = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
return token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[int]:
__a = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase )
__a = ' '.join(UpperCAmelCase )
__a = self.encode(UpperCAmelCase )
if len(UpperCAmelCase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 371
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCAmelCase( __lowerCamelCase ):
return (data["data"], data["target"])
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCamelCase , __lowerCamelCase )
# Predict target for test data
__a = xgb.predict(__lowerCamelCase )
__a = predictions.reshape(len(__lowerCamelCase ) , 1 )
return predictions
def lowerCAmelCase( ):
__a = fetch_california_housing()
__a , __a = data_handling(__lowerCamelCase )
__a , __a , __a , __a = train_test_split(
__lowerCamelCase , __lowerCamelCase , test_size=0.25 , random_state=1 )
__a = xgboost(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(__lowerCamelCase , __lowerCamelCase )}''' )
print(f'''Mean Square Error : {mean_squared_error(__lowerCamelCase , __lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 197
| 0
|
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
# Checks if the entire collection has been sorted
if len(SCREAMING_SNAKE_CASE__ ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE__ , n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE__ , n - 1 )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
# Checks order between adjacent elements
if index >= len(SCREAMING_SNAKE_CASE__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowercase , lowercase : str = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE__ , index + 1 )
if __name__ == "__main__":
lowercase : Tuple = input("""Enter integers separated by spaces: """)
lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 20
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''ZinengTang/tvlt-base'''
UpperCamelCase__ : int = tempfile.mkdtemp()
def UpperCAmelCase__ ( self : int , **lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCamelCase__ : List[str] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase__ )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : List[Any] = self.get_feature_extractor()
UpperCamelCase__ : Dict = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : Any = np.ones([12000] )
UpperCamelCase__ : Union[str, Any] = feature_extractor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : Any = processor(audio=lowerCamelCase__ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.get_image_processor()
UpperCamelCase__ : Any = self.get_feature_extractor()
UpperCamelCase__ : int = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : int = np.ones([3, 224, 224] )
UpperCamelCase__ : List[str] = image_processor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : str = processor(images=lowerCamelCase__ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_feature_extractor()
UpperCamelCase__ : Union[str, Any] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : List[str] = np.ones([12000] )
UpperCamelCase__ : Tuple = np.ones([3, 224, 224] )
UpperCamelCase__ : Optional[Any] = processor(audio=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : str = self.get_feature_extractor()
UpperCamelCase__ : Tuple = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 146
| 0
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Tuple ='examples/'
__lowerCAmelCase : Dict ={
'examples': (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), 'release = "VERSION"\n'),
}
__lowerCAmelCase : Any ={
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
__lowerCAmelCase : Optional[int] ='README.md'
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ):
with open(lowercase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.read()
A__, A__ = REPLACE_PATTERNS[pattern]
A__ = replace.replace("VERSION" , lowercase__ )
A__ = re_pattern.sub(lowercase__ , lowercase__ )
with open(lowercase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowercase__ )
def UpperCamelCase ( _lowerCamelCase : str ):
for folder, directories, fnames in os.walk(lowercase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowercase__ , lowercase__ ) , lowercase__ , pattern="examples" )
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase__ , lowercase__ , lowercase__ )
if not patch:
update_version_in_examples(lowercase__ )
def UpperCamelCase ( ):
A__ = "🤗 Transformers currently provides the following architectures"
A__ = "1. Want to contribute a new model?"
with open(lowercase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
# Find the start of the list.
A__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
A__ = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(lowercase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase__ )
def UpperCamelCase ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
A__ = f.read()
A__ = REPLACE_PATTERNS["init"][0].search(lowercase__ ).groups()[0]
return packaging.version.parse(lowercase__ )
def UpperCamelCase ( _lowerCamelCase : List[Any]=False ):
A__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
A__ = default_version.base_version
elif patch:
A__ = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
A__ = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
A__ = input(F"Which version are you releasing? [{default_version}]" )
if len(lowercase__ ) == 0:
A__ = default_version
print(F"Updating version to {version}." )
global_version_update(lowercase__ , patch=lowercase__ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def UpperCamelCase ( ):
A__ = get_version()
A__ = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
A__ = current_version.base_version
# Check with the user we got that right.
A__ = input(F"Which version are we developing now? [{dev_version}]" )
if len(lowercase__ ) == 0:
A__ = dev_version
print(F"Updating version to {version}." )
global_version_update(lowercase__ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Any =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 363
|
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int = 1_00_00_00 ):
A__ = set(range(3 , _lowerCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _lowerCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowerCamelCase , _lowerCamelCase ) ) )
A__ = [float(_lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowerCamelCase , limit + 1 , _lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 123
| 0
|
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
a :Tuple = []
def generate(UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , UpperCAmelCase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
a :Tuple = arr[k - 1], arr[i]
else: # k is odd
a :Dict = arr[k - 1], arr[0]
generate(k - 1 , UpperCAmelCase_ )
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
snake_case : Dict = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 94
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_UpperCAmelCase : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''whisper'''
_a = ['''past_key_values''']
_a = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: int , _lowerCAmelCase: str=5_18_65 , _lowerCAmelCase: str=80 , _lowerCAmelCase: int=6 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=6 , _lowerCAmelCase: List[Any]=4 , _lowerCAmelCase: Any=15_36 , _lowerCAmelCase: Union[str, Any]=15_36 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: List[Any]=5_02_57 , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: str="gelu" , _lowerCAmelCase: Dict=2_56 , _lowerCAmelCase: Union[str, Any]=0.0 , _lowerCAmelCase: Any=0.0 , _lowerCAmelCase: Dict=0.0 , _lowerCAmelCase: Union[str, Any]=0.02 , _lowerCAmelCase: Any=False , _lowerCAmelCase: List[str]=15_00 , _lowerCAmelCase: Tuple=4_48 , _lowerCAmelCase: Optional[Any]=5_02_56 , _lowerCAmelCase: Dict=5_02_56 , _lowerCAmelCase: List[Any]=5_02_56 , _lowerCAmelCase: Union[str, Any]=None , _lowerCAmelCase: str=[2_20, 5_02_56] , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Optional[int]=2_56 , _lowerCAmelCase: int=False , _lowerCAmelCase: Dict=0.05 , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: List[str]=2 , _lowerCAmelCase: Tuple=0.0 , _lowerCAmelCase: str=10 , _lowerCAmelCase: Union[str, Any]=0 , _lowerCAmelCase: List[Any]=7 , **_lowerCAmelCase: Union[str, Any] , ):
lowercase :Optional[Any] = vocab_size
lowercase :Optional[int] = num_mel_bins
lowercase :Union[str, Any] = d_model
lowercase :List[Any] = encoder_layers
lowercase :Optional[Any] = encoder_attention_heads
lowercase :Union[str, Any] = decoder_layers
lowercase :List[str] = decoder_attention_heads
lowercase :Optional[int] = decoder_ffn_dim
lowercase :List[Any] = encoder_ffn_dim
lowercase :Optional[Any] = dropout
lowercase :Tuple = attention_dropout
lowercase :Tuple = activation_dropout
lowercase :Optional[Any] = activation_function
lowercase :Any = init_std
lowercase :Optional[int] = encoder_layerdrop
lowercase :Optional[int] = decoder_layerdrop
lowercase :str = use_cache
lowercase :Optional[Any] = encoder_layers
lowercase :List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase :Any = max_source_positions
lowercase :Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase :int = classifier_proj_size
lowercase :List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase :Tuple = apply_spec_augment
lowercase :int = mask_time_prob
lowercase :Union[str, Any] = mask_time_length
lowercase :Dict = mask_time_min_masks
lowercase :Tuple = mask_feature_prob
lowercase :List[Any] = mask_feature_length
lowercase :List[Any] = mask_feature_min_masks
lowercase :Any = median_filter_width
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , suppress_tokens=_lowerCAmelCase , begin_suppress_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
class __lowerCAmelCase ( lowerCAmelCase):
@property
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Tuple = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowercase :List[Any] = {0: "batch"}
else:
lowercase :str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
return common_inputs
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase: int = -1 , _lowerCAmelCase: int = -1 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional["TensorType"] = None , _lowerCAmelCase: int = 2_20_50 , _lowerCAmelCase: float = 5.0 , _lowerCAmelCase: int = 2_20 , ):
lowercase :List[str] = OrderedDict()
lowercase :str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_lowerCAmelCase , framework=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , time_duration=_lowerCAmelCase , frequency=_lowerCAmelCase , )
lowercase :Optional[Any] = encoder_inputs["input_features"].shape[2]
lowercase :List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase :Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :str = encoder_inputs.pop("input_features" )
lowercase :Optional[int] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowercase :List[str] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE ( self: str ):
return 1e-3
| 236
| 0
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , ):
if config_name_or_path is None:
__UpperCamelCase ='facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
__UpperCamelCase =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__UpperCamelCase =question_encoder_name_or_path
__UpperCamelCase =RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
__UpperCamelCase =RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =gen_config
__UpperCamelCase =question_encoder_config
__UpperCamelCase =model_class.from_pretrained_question_encoder_generator(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
rag_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Sanity check.
model_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Save tokenizers.
__UpperCamelCase =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
__UpperCamelCase =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
_A = parser.parse_args()
_A = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 117
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 117
| 1
|
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : List[str] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Dict , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : str ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
| 188
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case = 256047
__snake_case = 256145
@require_sentencepiece
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = NllbTokenizer
_a = NllbTokenizerFast
_a = True
_a = True
_a = {}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ :str = NllbTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = NllbTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
UpperCamelCase__ :int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase__ :List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCamelCase__ :Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase__ :Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Dict = tempfile.mkdtemp()
UpperCamelCase__ :str = tokenizer_r.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :int = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCamelCase__ :Optional[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
UpperCamelCase__ :List[Any] = tokenizer_r.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase__ :Dict = tempfile.mkdtemp()
UpperCamelCase__ :Union[str, Any] = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
UpperCamelCase__ :Tuple = tokenizer_r.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :str = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase__ :List[Any] = tempfile.mkdtemp()
UpperCamelCase__ :Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
UpperCamelCase__ :List[str] = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase__ :int = tokenizer_r.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Dict = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
@require_torch
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.test_seqaseq:
return
UpperCamelCase__ :int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
UpperCamelCase__ :List[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
UpperCamelCase__ :str = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
UpperCamelCase__ :Dict = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCamelCase_ , tgt_texts=UpperCamelCase_ , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
UpperCamelCase__ :List[str] = tokenizer.prepare_seqaseq_batch(
UpperCamelCase_ , tgt_texts=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCamelCase__ :List[str] = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCamelCase_ , max_length=3 , max_target_length=10 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , UpperCamelCase_ )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :Optional[Any] = [AddedToken('''<special>''' , lstrip=UpperCamelCase_ )]
UpperCamelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :str = tokenizer_r.encode('''Hey this is a <special> token''' )
UpperCamelCase__ :int = tokenizer_r.encode('''<special>''' , add_special_tokens=UpperCamelCase_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCamelCase__ :int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Tuple = self.tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer_p.encode('''Hey this is a <special> token''' )
UpperCamelCase__ :Any = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_a = 'facebook/nllb-200-distilled-600M'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def lowerCAmelCase__ ( cls ):
'''simple docstring'''
UpperCamelCase__ :NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
UpperCamelCase__ :str = 1
return cls
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 256057 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase__ :Optional[Any] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
UpperCamelCase__ :Dict = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , UpperCamelCase_ )
UpperCamelCase__ :Tuple = 10
UpperCamelCase__ :List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [256203, 3] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = tempfile.mkdtemp()
UpperCamelCase__ :Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = NllbTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
UpperCamelCase__ :List[str] = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
UpperCamelCase__ :Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' )
UpperCamelCase__ :Dict = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors='''pt''' )
UpperCamelCase__ :List[str] = targets['''input_ids''']
UpperCamelCase__ :str = shift_tokens_right(
UpperCamelCase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[256047, 70, 7356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 256057,
} , )
@require_torch
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :str = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
UpperCamelCase__ :Union[str, Any] = False
UpperCamelCase__ :Union[str, Any] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 219
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def a ( __a ) -> None:
'''simple docstring'''
create_state_space_tree(__a , [] , 0 )
def a ( __a , __a , __a ) -> None:
'''simple docstring'''
if index == len(__a ):
print(__a )
return
create_state_space_tree(__a , __a , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__a , __a , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__snake_case = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 219
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : List[Any] = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ["""ConvNextFeatureExtractor"""]
a__ : Any = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 80
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ={
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = 'efficientnet'
def __init__( self , __lowerCAmelCase = 3 , __lowerCAmelCase = 600 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 3.1 , __lowerCAmelCase = 8 , __lowerCAmelCase = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase = [] , __lowerCAmelCase = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase = 0.2_5 , __lowerCAmelCase = "swish" , __lowerCAmelCase = 2560 , __lowerCAmelCase = "mean" , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 0.0_0_1 , __lowerCAmelCase = 0.9_9 , __lowerCAmelCase = 0.5 , __lowerCAmelCase = 0.2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = num_channels
lowercase = image_size
lowercase = width_coefficient
lowercase = depth_coefficient
lowercase = depth_divisor
lowercase = kernel_sizes
lowercase = in_channels
lowercase = out_channels
lowercase = depthwise_padding
lowercase = strides
lowercase = num_block_repeats
lowercase = expand_ratios
lowercase = squeeze_expansion_ratio
lowercase = hidden_act
lowercase = hidden_dim
lowercase = pooling_type
lowercase = initializer_range
lowercase = batch_norm_eps
lowercase = batch_norm_momentum
lowercase = dropout_rate
lowercase = drop_connect_rate
lowercase = sum(__lowerCAmelCase ) * 4
class _A ( lowerCAmelCase ):
snake_case__ : Dict = version.parse('1.11' )
@property
def A__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self ):
"""simple docstring"""
return 1E-5
| 197
| 0
|
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase_ : int | str ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = str(UpperCamelCase_ )
return n == n[::-1]
def _UpperCAmelCase (UpperCamelCase_ : int = 1000000 ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
for i in range(1 , UpperCamelCase_ ):
if is_palindrome(UpperCamelCase_ ) and is_palindrome(bin(UpperCamelCase_ ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 367
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class __snake_case (_a ):
lowerCAmelCase__ = "mvp"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , _UpperCAmelCase : Optional[Any]=5_0267 , _UpperCAmelCase : List[Any]=1024 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Optional[int]=4096 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Tuple=4096 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Dict=1024 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=2 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any=100 , _UpperCAmelCase : int=800 , **_UpperCAmelCase : Optional[Any] , ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : int = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Dict = encoder_layers
_lowerCAmelCase : int = encoder_attention_heads
_lowerCAmelCase : str = decoder_ffn_dim
_lowerCAmelCase : Tuple = decoder_layers
_lowerCAmelCase : str = decoder_attention_heads
_lowerCAmelCase : Optional[int] = dropout
_lowerCAmelCase : int = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Tuple = activation_function
_lowerCAmelCase : Dict = init_std
_lowerCAmelCase : Tuple = encoder_layerdrop
_lowerCAmelCase : Any = decoder_layerdrop
_lowerCAmelCase : Union[str, Any] = classifier_dropout
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : Union[str, Any] = encoder_layers
_lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Any = use_prompt
_lowerCAmelCase : Optional[int] = prompt_length
_lowerCAmelCase : Optional[int] = prompt_mid_dim
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _UpperCAmelCase ):
_lowerCAmelCase : Optional[Any] = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
| 159
| 0
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ = 1000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 2**power
__SCREAMING_SNAKE_CASE = 0
while n:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 54
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_snake_case : Dict = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_snake_case : List[str] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
_snake_case : List[str] = "zero2"
_snake_case : Any = "zero3"
_snake_case : Dict = [ZEROa, ZEROa]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__snake_case : Optional[Any] = parameterized.to_safe_name("_".join(str(__lowerCamelCase ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
_snake_case : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a (_lowerCAmelCase ):
"""simple docstring"""
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Union[str, Any]:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] ) -> int:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : int ) -> Dict:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : str , lowerCamelCase : Any ) -> str:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
def __snake_case ( self : str , lowerCamelCase : List[Any] ) -> Union[str, Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Any = models[model]
__snake_case : Tuple = self.run_trainer(
stage=lowerCamelCase , model_name=lowerCamelCase , eval_steps=lowerCamelCase , num_train_epochs=1 , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
self.do_checks(lowerCamelCase )
return output_dir
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : int = 1 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir("./xxx" , after=lowerCamelCase )
__snake_case : Optional[int] = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__snake_case : Optional[int] = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__snake_case : Dict = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__snake_case : Any = self.get_launcher(lowerCamelCase )
__snake_case : Optional[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
return output_dir
def __snake_case ( self : str , lowerCamelCase : str=False ) -> Any:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__snake_case : Dict = min(2 , get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 123
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class lowerCAmelCase_ ( a__ ):
'''simple docstring'''
__snake_case = "deta"
__snake_case = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=9_00 , _UpperCAmelCase=20_48 , _UpperCAmelCase=6 , _UpperCAmelCase=20_48 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=10_24 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=2_56 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=True , _UpperCAmelCase=3_00 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.25 , **_UpperCAmelCase , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case_ = CONFIG_MAPPING["resnet"](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = backbone_config.pop('''model_type''' )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(_UpperCAmelCase )
snake_case_ = backbone_config
snake_case_ = num_queries
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = init_xavier_std
snake_case_ = encoder_layerdrop
snake_case_ = auxiliary_loss
snake_case_ = position_embedding_type
# deformable attributes
snake_case_ = num_feature_levels
snake_case_ = encoder_n_points
snake_case_ = decoder_n_points
snake_case_ = two_stage
snake_case_ = two_stage_num_proposals
snake_case_ = with_box_refine
snake_case_ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
snake_case_ = class_cost
snake_case_ = bbox_cost
snake_case_ = giou_cost
# Loss coefficients
snake_case_ = mask_loss_coefficient
snake_case_ = dice_loss_coefficient
snake_case_ = bbox_loss_coefficient
snake_case_ = giou_loss_coefficient
snake_case_ = eos_coefficient
snake_case_ = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 370
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCAmelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ):
return 12
@property
def UpperCamelCase__ ( self ):
return 12
@property
def UpperCamelCase__ ( self ):
return 32
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase__ ( self ):
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_UpperCAmelCase )
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = 12
snake_case_ = 12
snake_case_ = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
snake_case_ = TransformeraDModel(**_UpperCAmelCase )
return model
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu'''
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=_UpperCAmelCase )
snake_case_ = VQDiffusionPipeline(
vqvae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , transformer=_UpperCAmelCase , scheduler=_UpperCAmelCase , learned_classifier_free_sampling_embeddings=_UpperCAmelCase , )
snake_case_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = '''teddy bear playing in the pool'''
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=_UpperCAmelCase , output_type='''np''' , return_dict=_UpperCAmelCase , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu'''
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=_UpperCAmelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ = VQDiffusionPipeline(
vqvae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , transformer=_UpperCAmelCase , scheduler=_UpperCAmelCase , learned_classifier_free_sampling_embeddings=_UpperCAmelCase , )
snake_case_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = '''teddy bear playing in the pool'''
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=_UpperCAmelCase , output_type='''np''' , return_dict=_UpperCAmelCase , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
snake_case_ = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
snake_case_ = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 267
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _a ( lowerCamelCase: Tuple ) -> List[str]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _a ( ) -> List[Any]:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
__A = [1, 2, 3]
with pytest.raises(lowerCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowerCamelCase , lowerCamelCase , num_proc=2 )
with pytest.raises(lowerCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowerCamelCase , lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _a ( lowerCamelCase: Dict ) -> List[Any]:
'''simple docstring'''
__A = [1, 2]
__A = {'''a''': 1, '''b''': 2}
__A = {'''a''': [1, 2], '''b''': [3, 4]}
__A = {'''a''': {'''1''': 1}, '''b''': 2}
__A = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__A = [2, 3]
__A = {'''a''': 2, '''b''': 3}
__A = {'''a''': [2, 3], '''b''': [4, 5]}
__A = {'''a''': {'''1''': 2}, '''b''': 3}
__A = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
assert map_nested(lowerCamelCase , lowerCamelCase , num_proc=lowerCamelCase ) == expected_map_nested_sa
| 117
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
snake_case__ : Optional[int] = parser.parse_args()
if args.model_type == "bert":
snake_case__ : Dict = BertForMaskedLM.from_pretrained(args.model_name)
snake_case__ : Union[str, Any] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
snake_case__ : Optional[int] = model.state_dict()
snake_case__ : List[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
snake_case__ : Tuple = state_dict[f'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
snake_case__ : Optional[Any] = state_dict[f'{prefix}.embeddings.LayerNorm.{w}']
snake_case__ : int = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
snake_case__ : Union[str, Any] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
snake_case__ : Dict = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
snake_case__ : Optional[int] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
snake_case__ : Optional[Any] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
snake_case__ : List[str] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
snake_case__ : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
snake_case__ : Optional[int] = state_dict['cls.predictions.decoder.weight']
snake_case__ : str = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
snake_case__ : int = state_dict[f'cls.predictions.transform.dense.{w}']
snake_case__ : Optional[int] = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 117
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = '''▁'''
lowerCamelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase : int = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCamelCase : Optional[Any] = {
'''facebook/mbart-large-50-one-to-many-mmt''': 10_24,
}
# fmt: off
lowerCamelCase : List[Any] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = VOCAB_FILES_NAMES
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[str] = PRETRAINED_VOCAB_FILES_MAP
_A : Tuple = ['''input_ids''', '''attention_mask''']
_A : List[int] = []
_A : List[int] = []
def __init__( self : List[Any] , __a : Dict , __a : List[str]=None , __a : Union[str, Any]=None , __a : List[str]="</s>" , __a : List[Any]="</s>" , __a : str="<s>" , __a : str="<unk>" , __a : Tuple="<pad>" , __a : Optional[int]="<mask>" , __a : Optional[Dict[str, Any]] = None , **__a : Union[str, Any] , ) -> None:
"""simple docstring"""
__lowercase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__lowercase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__lowercase : Any = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__a , tgt_lang=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
__lowercase : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase : Union[str, Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase : Optional[Any] = 1
__lowercase : str = len(self.sp_model )
__lowercase : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a )
}
__lowercase : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
__lowercase : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowercase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowercase : List[Any] = src_lang if src_lang is not None else """en_XX"""
__lowercase : str = self.lang_code_to_id[self._src_lang]
__lowercase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : str , __a : str ) -> None:
"""simple docstring"""
__lowercase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : str = self.__dict__.copy()
__lowercase : Dict = None
return state
def __setstate__( self : List[str] , __a : Dict ) -> None:
"""simple docstring"""
__lowercase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : Any = {}
__lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : Optional[Any] , __a : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__a , out_type=__a )
def lowerCAmelCase ( self : Any , __a : str ) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase : Dict = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : Dict , __a : int ) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : str , __a : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = []
__lowercase : Tuple = """"""
__lowercase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__lowercase : Optional[int] = True
__lowercase : Union[str, Any] = []
else:
current_sub_tokens.append(__a )
__lowercase : int = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def lowerCAmelCase ( self : str , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : List[str] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
__lowercase : Any = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def lowerCAmelCase ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
__lowercase : Any = [1] * len(self.prefix_tokens )
__lowercase : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def lowerCAmelCase ( self : int , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : List[Any] , __a : Union[str, Any] , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Dict ) -> List[str]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowercase : List[str] = src_lang
__lowercase : Optional[Any] = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
__lowercase : Any = self.convert_tokens_to_ids(__a )
__lowercase : Dict = tgt_lang_id
return inputs
def lowerCAmelCase ( self : Optional[Any] , __a : List[str] , __a : str = "en_XX" , __a : Optional[List[str]] = None , __a : str = "ro_RO" , **__a : Optional[Any] , ) -> BatchEncoding:
"""simple docstring"""
__lowercase : Dict = src_lang
__lowercase : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self : Tuple , __a : str ) -> None:
"""simple docstring"""
__lowercase : int = self.lang_code_to_id[src_lang]
__lowercase : int = [self.cur_lang_code_id]
__lowercase : int = [self.eos_token_id]
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> None:
"""simple docstring"""
__lowercase : Union[str, Any] = self.lang_code_to_id[tgt_lang]
__lowercase : Tuple = [self.cur_lang_code_id]
__lowercase : str = [self.eos_token_id]
| 370
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 306
| 0
|
__lowerCamelCase : Any = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
while place < len(__UpperCamelCase ):
if (place + 1 < len(__UpperCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = divmod(__UpperCamelCase , __UpperCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__UpperCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__UpperCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__UpperCamelCase )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ = script_fpath.stem
SCREAMING_SNAKE_CASE__ = importlib.import_module(__UpperCamelCase )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 219
| 1
|
'''simple docstring'''
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __magic_name__ ( __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
if subparsers is not None:
snake_case_ = subparsers.add_parser('''test''' )
else:
snake_case_ = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''', default=__UpperCAmelCase, help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
), )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
snake_case_ = script_name
else:
snake_case_ = F"--config_file={args.config_file} {script_name}"
snake_case_ = ['''accelerate-launch'''] + test_args.split()
snake_case_ = execute_subprocess_async(__UpperCAmelCase, env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case_ = test_command_parser()
snake_case_ = parser.parse_args()
test_command(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 353
|
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
a : Any = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a : int = spec.loader.load_module()
a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a : str = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
a : str = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ = False
# source code of `config_class`
snake_case_ = inspect.getsource(__UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(__UpperCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ ,snake_case_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
snake_case_ = True
break
snake_case_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
snake_case_ = '''\n'''.join(sorted(__UpperCAmelCase ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 72
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE :str = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
SCREAMING_SNAKE_CASE :Optional[int] = {'''mobilebert-uncased''': 5_12}
SCREAMING_SNAKE_CASE :Dict = {}
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = MobileBertTokenizer
def __init__( self : Dict , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="[UNK]" , _lowerCAmelCase : Union[str, Any]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Tuple="[CLS]" , _lowerCAmelCase : Union[str, Any]="[MASK]" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_lowerCAmelCase )
snake_case_ = do_lower_case
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 159
| 0
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
snake_case : List[str] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
snake_case : Any = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
snake_case : List[Any] = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
snake_case : Optional[Any] = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
snake_case : Dict = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __lowercase ( __lowerCAmelCase : List[Any] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=False ):
a__ = checkpoint[F'{old_prefix}.in_layers.0.weight']
a__ = checkpoint[F'{old_prefix}.in_layers.0.bias']
a__ = checkpoint[F'{old_prefix}.in_layers.2.weight']
a__ = checkpoint[F'{old_prefix}.in_layers.2.bias']
a__ = checkpoint[F'{old_prefix}.emb_layers.1.weight']
a__ = checkpoint[F'{old_prefix}.emb_layers.1.bias']
a__ = checkpoint[F'{old_prefix}.out_layers.0.weight']
a__ = checkpoint[F'{old_prefix}.out_layers.0.bias']
a__ = checkpoint[F'{old_prefix}.out_layers.3.weight']
a__ = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
a__ = checkpoint[F'{old_prefix}.skip_connection.weight']
a__ = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int]=None ):
a__ , a__ , a__ = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
a__ , a__ , a__ = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
a__ = checkpoint[F'{old_prefix}.norm.weight']
a__ = checkpoint[F'{old_prefix}.norm.bias']
a__ = weight_q.squeeze(-1 ).squeeze(-1 )
a__ = bias_q.squeeze(-1 ).squeeze(-1 )
a__ = weight_k.squeeze(-1 ).squeeze(-1 )
a__ = bias_k.squeeze(-1 ).squeeze(-1 )
a__ = weight_v.squeeze(-1 ).squeeze(-1 )
a__ = bias_v.squeeze(-1 ).squeeze(-1 )
a__ = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
a__ = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
a__ = torch.load(__lowerCAmelCase , map_location='cpu' )
a__ = {}
a__ = checkpoint['time_embed.0.weight']
a__ = checkpoint['time_embed.0.bias']
a__ = checkpoint['time_embed.2.weight']
a__ = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
a__ = checkpoint['label_emb.weight']
a__ = checkpoint['input_blocks.0.0.weight']
a__ = checkpoint['input_blocks.0.0.bias']
a__ = unet_config['down_block_types']
a__ = unet_config['layers_per_block']
a__ = unet_config['attention_head_dim']
a__ = unet_config['block_out_channels']
a__ = 1
a__ = channels_list[0]
for i, layer_type in enumerate(__lowerCAmelCase ):
a__ = channels_list[i]
a__ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCAmelCase ):
a__ = F'down_blocks.{i}.resnets.{j}'
a__ = F'input_blocks.{current_layer}.0'
a__ = True if j == 0 and downsample_block_has_skip else False
a__ = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCAmelCase ):
a__ = F'down_blocks.{i}.resnets.{j}'
a__ = F'input_blocks.{current_layer}.0'
a__ = True if j == 0 and downsample_block_has_skip else False
a__ = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
a__ = F'down_blocks.{i}.attentions.{j}'
a__ = F'input_blocks.{current_layer}.1'
a__ = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
a__ = F'down_blocks.{i}.downsamplers.0'
a__ = F'input_blocks.{current_layer}.0'
a__ = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
a__ = current_channels
# hardcoded the mid-block for now
a__ = 'mid_block.resnets.0'
a__ = 'middle_block.0'
a__ = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = 'mid_block.attentions.0'
a__ = 'middle_block.1'
a__ = convert_attention(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = 'mid_block.resnets.1'
a__ = 'middle_block.2'
a__ = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = 0
a__ = unet_config['up_block_types']
for i, layer_type in enumerate(__lowerCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
a__ = F'up_blocks.{i}.resnets.{j}'
a__ = F'output_blocks.{current_layer}.0'
a__ = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
a__ = F'up_blocks.{i}.upsamplers.0'
a__ = F'output_blocks.{current_layer-1}.1'
a__ = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
a__ = F'up_blocks.{i}.resnets.{j}'
a__ = F'output_blocks.{current_layer}.0'
a__ = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
a__ = F'up_blocks.{i}.attentions.{j}'
a__ = F'output_blocks.{current_layer}.1'
a__ = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
a__ = F'up_blocks.{i}.upsamplers.0'
a__ = F'output_blocks.{current_layer-1}.2'
a__ = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = checkpoint['out.0.weight']
a__ = checkpoint['out.0.bias']
a__ = checkpoint['out.2.weight']
a__ = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
snake_case : Union[str, Any] = parser.parse_args()
snake_case : Dict = strabool(args.class_cond)
snake_case : Dict = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
snake_case : Optional[Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
snake_case : Optional[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
snake_case : Optional[int] = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
snake_case : List[Any] = None
snake_case : Union[str, Any] = con_pt_to_diffuser(args.unet_path, unet_config)
snake_case : Dict = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
snake_case : Dict = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
snake_case : List[Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
snake_case : List[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
snake_case : Dict = CMStochasticIterativeScheduler(**scheduler_config)
snake_case : Optional[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 109
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[int] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 109
| 1
|
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
A : Optional[int] = "sshleifer/mar_enro_6_3_student"
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self ):
super().setUp()
__lowerCAmelCase = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=__a , )
__lowerCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def snake_case ( self ):
MarianMTModel.from_pretrained(__a )
@slow
@require_torch_gpu
def snake_case ( self ):
__lowerCAmelCase = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
__lowerCAmelCase = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
__lowerCAmelCase = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
__lowerCAmelCase = bash_script.replace(__a , str(__a ) )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__lowerCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__lowerCAmelCase = ["finetune.py"] + bash_script.split() + args
with patch.object(__a , "argv" , __a ):
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = pl.Trainer.add_argparse_args(__a )
__lowerCAmelCase = SummarizationModule.add_model_specific_args(__a , os.getcwd() )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = main(__a )
# Check metrics
__lowerCAmelCase = load_json(model.metrics_save_path )
__lowerCAmelCase = metrics["val"][0]
__lowerCAmelCase = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , __a )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCAmelCase = os.listdir(__a )
__lowerCAmelCase = [x for x in contents if x.endswith(".ckpt" )][0]
__lowerCAmelCase = os.path.join(args.output_dir , __a )
__lowerCAmelCase = torch.load(__a , map_location="cpu" )
__lowerCAmelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCAmelCase = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def snake_case ( self ):
__lowerCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
__lowerCAmelCase = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_28,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
__lowerCAmelCase = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
__lowerCAmelCase = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
__lowerCAmelCase = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
__lowerCAmelCase = bash_script.replace(__a , str(__a ) )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = bash_script.replace("--fp16" , "" )
__lowerCAmelCase = 6
__lowerCAmelCase = (
["distillation.py"]
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
"--gpus=1",
"--learning_rate=1e-3",
f"--num_train_epochs={epochs}",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(__a , "argv" , __a ):
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = pl.Trainer.add_argparse_args(__a )
__lowerCAmelCase = SummarizationDistiller.add_model_specific_args(__a , os.getcwd() )
__lowerCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__lowerCAmelCase = distill_main(__a )
# Check metrics
__lowerCAmelCase = load_json(model.metrics_save_path )
__lowerCAmelCase = metrics["val"][0]
__lowerCAmelCase = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , __a )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCAmelCase = os.listdir(__a )
__lowerCAmelCase = [x for x in contents if x.endswith(".ckpt" )][0]
__lowerCAmelCase = os.path.join(args.output_dir , __a )
__lowerCAmelCase = torch.load(__a , map_location="cpu" )
__lowerCAmelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCAmelCase = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 57
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCAmelCase : int = random.Random()
def a__ ( a__ , a__=1.0 , a__=None , a__=None ):
"""simple docstring"""
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : List[str]=400 , __SCREAMING_SNAKE_CASE : Any=2_000 , __SCREAMING_SNAKE_CASE : List[str]=10 , __SCREAMING_SNAKE_CASE : Optional[int]=160 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=4_000 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : List[Any]=True , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = return_attention_mask
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = chunk_length
__SCREAMING_SNAKE_CASE = hop_length
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
def _flatten(__SCREAMING_SNAKE_CASE : Dict ):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) )
if equal_length:
__SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_first.mel_filters
__SCREAMING_SNAKE_CASE = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , """feat_extract.json""" )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_first.mel_filters
__SCREAMING_SNAKE_CASE = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__SCREAMING_SNAKE_CASE = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test truncation required
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
__SCREAMING_SNAKE_CASE = [x[: feature_extractor.n_samples] for x in speech_inputs]
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs_truncated]
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = np.random.rand(100 , 32 ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = WhisperFeatureExtractor()
__SCREAMING_SNAKE_CASE = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )[0]
__SCREAMING_SNAKE_CASE = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
__SCREAMING_SNAKE_CASE = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__SCREAMING_SNAKE_CASE )[0]
self.assertTrue(np.all(np.mean(__SCREAMING_SNAKE_CASE ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__SCREAMING_SNAKE_CASE ) - 1 ) < 1E-3 ) )
| 267
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Dict = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ['PerceiverFeatureExtractor']
_lowercase : Optional[Any] = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_lowercase : Any = True
except ImportError:
_lowercase : str = False
try:
from torch.hub import _get_torch_home
_lowercase : Any = _get_torch_home()
except ImportError:
_lowercase : Dict = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_lowercase : Tuple = os.path.join(torch_cache_home, 'transformers')
_lowercase : int = 'https://cdn.huggingface.co'
_lowercase : Union[str, Any] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_lowercase : str = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_lowercase : str = os.path.join(PATH, 'config.yaml')
_lowercase : int = os.path.join(PATH, 'attributes.txt')
_lowercase : List[str] = os.path.join(PATH, 'objects.txt')
_lowercase : Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_lowercase : int = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_lowercase : Dict = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_lowercase : Union[str, Any] = 'pytorch_model.bin'
_lowercase : List[str] = 'config.yaml'
def lowercase__ ( snake_case_ :int=OBJECTS , snake_case_ :Optional[int]=ATTRIBUTES ):
__UpperCAmelCase = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__UpperCAmelCase = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__UpperCAmelCase = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__UpperCAmelCase = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__UpperCAmelCase = v
return r
class _UpperCAmelCase :
a__ : Tuple = {}
def __init__( self : List[str] , _lowercase : dict , _lowercase : str = "root" , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = name
__UpperCAmelCase = level
__UpperCAmelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase = copy.deepcopy(_lowercase )
__UpperCAmelCase = copy.deepcopy(_lowercase )
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = Config(_lowercase , name=_lowercase , level=level + 1 )
__UpperCAmelCase = v
setattr(self , _lowercase , _lowercase )
__UpperCAmelCase = d
def __repr__( self : Any ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Dict ):
__UpperCAmelCase = val
__UpperCAmelCase = val
__UpperCAmelCase = key.split('''.''' )
__UpperCAmelCase = len(_lowercase ) - 1
__UpperCAmelCase = self._pointer
if len(_lowercase ) > 1:
for i, l in enumerate(_lowercase ):
if hasattr(self , _lowercase ) and isinstance(getattr(self , _lowercase ) , _lowercase ):
setattr(getattr(self , _lowercase ) , '''.'''.join(levels[i:] ) , _lowercase )
if l == last_level:
__UpperCAmelCase = val
else:
__UpperCAmelCase = pointer[l]
def a ( self : int ):
return self._pointer
def a ( self : List[str] , _lowercase : Dict , _lowercase : str ):
with open(F'''{file_name}''' , '''w''' ) as stream:
dump(_lowercase , _lowercase )
def a ( self : int , _lowercase : Dict , _lowercase : Tuple ):
with open(F'''{file_name}''' , '''w''' ) as stream:
json.dump(_lowercase , _lowercase )
@staticmethod
def a ( _lowercase : str ):
with open(_lowercase ) as stream:
__UpperCAmelCase = load(_lowercase , Loader=_lowercase )
return data
def __str__( self : Dict ):
__UpperCAmelCase = ''' '''
if self._name != "root":
__UpperCAmelCase = F'''{t * (self._level-1)}{self._name}:\n'''
else:
__UpperCAmelCase = ''''''
__UpperCAmelCase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_lowercase , _lowercase ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_lowercase ).__name__})\n'''
__UpperCAmelCase = level
return r[:-1]
@classmethod
def a ( cls : str , _lowercase : str , **_lowercase : Any ):
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(_lowercase , **_lowercase )
return cls(_lowercase )
@classmethod
def a ( cls : Any , _lowercase : str , **_lowercase : str ):
__UpperCAmelCase = kwargs.pop('''cache_dir''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''force_download''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''resume_download''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''proxies''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''local_files_only''' , _lowercase )
if os.path.isdir(_lowercase ):
__UpperCAmelCase = os.path.join(_lowercase , _lowercase )
elif os.path.isfile(_lowercase ) or is_remote_url(_lowercase ):
__UpperCAmelCase = pretrained_model_name_or_path
else:
__UpperCAmelCase = hf_bucket_url(_lowercase , filename=_lowercase , use_cdn=_lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase = cached_path(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase = Config.load_yaml(_lowercase )
except EnvironmentError:
__UpperCAmelCase = '''Can\'t load config for'''
raise EnvironmentError(_lowercase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(_lowercase ), kwargs
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = torch.load('''dump.pt''' , map_location=in_tensor.device )
__UpperCAmelCase = in_tensor.numpy()
__UpperCAmelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( snake_case_ :str , snake_case_ :str , snake_case_ :List[str]=True ):
__UpperCAmelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase = '''/''' not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( snake_case_ :str , snake_case_ :Tuple , snake_case_ :List[str]=None , snake_case_ :List[str]=0 , snake_case_ :List[Any]=None , ):
__UpperCAmelCase = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__UpperCAmelCase = {'''user-agent''': ua}
if resume_size > 0:
__UpperCAmelCase = '''bytes=%d-''' % (resume_size,)
__UpperCAmelCase = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase = response.headers.get('''Content-Length''' )
__UpperCAmelCase = resume_size + int(snake_case_ ) if content_length is not None else None
__UpperCAmelCase = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :str=None , snake_case_ :Optional[int]=False , snake_case_ :List[Any]=None , snake_case_ :List[Any]=10 , snake_case_ :Optional[int]=False , snake_case_ :List[str]=None , snake_case_ :Union[str, Any]=False , ):
if cache_dir is None:
__UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__UpperCAmelCase = None
if not local_files_only:
try:
__UpperCAmelCase = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__UpperCAmelCase = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__UpperCAmelCase = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__UpperCAmelCase = _resumable_file_manager
if os.path.exists(snake_case_ ):
__UpperCAmelCase = os.stat(snake_case_ ).st_size
else:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__UpperCAmelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__UpperCAmelCase = {'''url''': url, '''etag''': etag}
__UpperCAmelCase = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def lowercase__ ( snake_case_ :int , snake_case_ :str=None ):
__UpperCAmelCase = url.encode('''utf-8''' )
__UpperCAmelCase = shaaaa(snake_case_ )
__UpperCAmelCase = url_hash.hexdigest()
if etag:
__UpperCAmelCase = etag.encode('''utf-8''' )
__UpperCAmelCase = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase__ ( snake_case_ :Dict , snake_case_ :List[Any]=None , snake_case_ :List[Any]=False , snake_case_ :Optional[int]=None , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=None , snake_case_ :Any=False , snake_case_ :int=False , snake_case_ :Optional[int]=False , ):
if cache_dir is None:
__UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__UpperCAmelCase = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase = os.path.split(snake_case_ )
__UpperCAmelCase = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__UpperCAmelCase = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any]="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__UpperCAmelCase = eval(f.read() )
else:
__UpperCAmelCase = requests.get(snake_case_ )
try:
__UpperCAmelCase = requests.json()
except Exception:
__UpperCAmelCase = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase = eval(snake_case_ )
except Exception:
__UpperCAmelCase = data.split('''\n''' )
req.close()
return data
def lowercase__ ( snake_case_ :Union[str, Any] ):
__UpperCAmelCase = requests.get(snake_case_ )
__UpperCAmelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__UpperCAmelCase = pkl.load(snake_case_ )
__UpperCAmelCase = weights.pop('''model''' )
__UpperCAmelCase = {}
for k, v in model.items():
__UpperCAmelCase = torch.from_numpy(snake_case_ )
if "running_var" in k:
__UpperCAmelCase = torch.tensor([0] )
__UpperCAmelCase = k.replace('''running_var''' , '''num_batches_tracked''' )
__UpperCAmelCase = zero
return new
def lowercase__ ( ):
print(F'''{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__UpperCAmelCase = cva.imread(snake_case_ )
else:
__UpperCAmelCase = get_image_from_url(snake_case_ )
assert img is not None, F'''could not connect to: {im}'''
__UpperCAmelCase = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase = img[:, :, ::-1]
return img
def lowercase__ ( snake_case_ :Any , snake_case_ :int=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ ))
| 86
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
SCREAMING_SNAKE_CASE_ = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , x.transpose() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , np.asarray(transpose(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.reshape(UpperCAmelCase_ , (4, 3) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.reshape(UpperCAmelCase_ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.asarray(reshape(UpperCAmelCase_ , (4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.asarray(reshape(UpperCAmelCase_ , (12, 5) ) ) ) )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.squeeze(UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.squeeze(UpperCAmelCase_ , axis=2 ) ) )
@require_torch
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.asarray(squeeze(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.asarray(squeeze(UpperCAmelCase_ , axis=2 ) ) ) )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.expand_dims(UpperCAmelCase_ , axis=1 ) ) )
@require_torch
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.asarray(expand_dims(UpperCAmelCase_ , axis=1 ) ) ) )
| 299
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : int = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase ( a_ , a_ ):
"""simple docstring"""
A : int = "nat"
A : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Union[str, Any]=6_4 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 1_6] , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Optional[Any]=3.0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1e-5 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : Optional[Any] = patch_size
a : List[Any] = num_channels
a : str = embed_dim
a : Dict = depths
a : Optional[int] = len(UpperCAmelCase_)
a : List[str] = num_heads
a : Optional[Any] = kernel_size
a : Optional[Any] = mlp_ratio
a : List[str] = qkv_bias
a : str = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : List[str] = drop_path_rate
a : Dict = hidden_act
a : List[Any] = layer_norm_eps
a : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a : Dict = int(embed_dim * 2 ** (len(UpperCAmelCase_) - 1))
a : Optional[Any] = layer_scale_init_value
a : Optional[int] = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_) + 1)]
a : Optional[Any] = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names)
| 357
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345
| 0
|
'''simple docstring'''
import unittest
import numpy as np
def __UpperCamelCase ( lowercase__ : np.ndarray, lowercase__ : np.ndarray, lowercase__ : np.ndarray, lowercase__ : np.ndarray | None = None, ):
'''simple docstring'''
__lowercase =np.shape(A_ )
__lowercase =np.shape(A_ )
__lowercase =np.shape(A_ )
if shape_a[0] != shape_b[0]:
__lowercase =(
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(A_ )
if shape_b[1] != shape_c[1]:
__lowercase =(
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(A_ )
__lowercase =pseudo_inv
if a_inv is None:
try:
__lowercase =np.linalg.inv(A_ )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase =np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase =np.array([[2, 1], [6, 3]] )
__lowercase =schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowercase =np.block([[a, b], [b.T, c]] )
__lowercase =np.linalg.det(__lowerCAmelCase )
__lowercase =np.linalg.det(__lowerCAmelCase )
__lowercase =np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase =np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase =np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase =np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase =np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 141
|
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( A_ : dict, A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = set()
# keep track of all the paths to be checked
_lowerCamelCase : str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCamelCase : str = queue.pop(0 )
# get the last node from the path
_lowerCamelCase : List[Any] = path[-1]
if node not in explored:
_lowerCamelCase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCamelCase : Union[str, Any] = list(A_ )
new_path.append(A_ )
queue.append(A_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A_ )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCamelCase : Optional[int] = [start]
_lowerCamelCase : int = set(A_ )
# Keep tab on distances from `start` node.
_lowerCamelCase : int = {start: 0, target: -1}
while queue:
_lowerCamelCase : Optional[Any] = queue.pop(0 )
if node == target:
_lowerCamelCase : Any = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A_ )
queue.append(A_ )
_lowerCamelCase : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 72
| 0
|
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = "utf-8"
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True # deprecated
__UpperCamelCase = None # deprecated
__UpperCamelCase = 10 << 20 # 10MB
__UpperCamelCase = None
class __magic_name__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCamelCase = JsonConfig
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
lowerCamelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
lowerCamelCase = data_files
if isinstance(_a , _a ):
lowerCamelCase = [files]
lowerCamelCase = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCamelCase = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
lowerCamelCase = [files]
lowerCamelCase = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"""files""": files} ) )
return splits
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCamelCase = self.config.features.arrow_schema.field(_a ).type
lowerCamelCase = pa_table.append_column(_a , pa.array([None] * len(_a ) , type=_a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCamelCase = json.load(_a )
# We keep only the field we are interested in
lowerCamelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_a , (list, tuple) ):
lowerCamelCase = set().union(*[row.keys() for row in dataset] )
lowerCamelCase = {col: [row.get(_a ) for row in dataset] for col in keys}
else:
lowerCamelCase = dataset
lowerCamelCase = pa.Table.from_pydict(_a )
yield file_idx, self._cast_table(_a )
# If the file has one json object per line
else:
with open(_a , """rb""" ) as f:
lowerCamelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCamelCase = max(self.config.chunksize // 32 , 16 << 10 )
lowerCamelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
lowerCamelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCamelCase = batch.decode(self.config.encoding , errors=_a ).encode("""utf-8""" )
try:
while True:
try:
lowerCamelCase = paj.read_json(
io.BytesIO(_a ) , read_options=paj.ReadOptions(block_size=_a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_a , pa.ArrowInvalid )
and "straddling" not in str(_a )
or block_size > len(_a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_a )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCamelCase = json.load(_a )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_a )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_a , _a ): # list is the only sequence type supported in JSON
try:
lowerCamelCase = set().union(*[row.keys() for row in dataset] )
lowerCamelCase = {col: [row.get(_a ) for row in dataset] for col in keys}
lowerCamelCase = pa.Table.from_pydict(_a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_a )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_a )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_a )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
batch_idx += 1
| 353
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *_a , _a=None , _a=None , **_a ):
"""simple docstring"""
super().__init__(*_a , **_a )
lowerCamelCase = eval_examples
lowerCamelCase = post_process_function
def _lowerCAmelCase ( self , _a=None , _a=None , _a=None , _a = "eval" ):
"""simple docstring"""
lowerCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase = self.get_eval_dataloader(_a )
lowerCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase = time.time()
try:
lowerCamelCase = eval_loop(
_a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCamelCase = compute_metrics
lowerCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase = self.post_process_function(_a , _a , output.predictions )
lowerCamelCase = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase = metrics.pop(_a )
metrics.update(output.metrics )
else:
lowerCamelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def _lowerCAmelCase ( self , _a , _a , _a=None , _a = "test" ):
"""simple docstring"""
lowerCamelCase = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase = self.compute_metrics
lowerCamelCase = None
lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase = time.time()
try:
lowerCamelCase = eval_loop(
_a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
lowerCamelCase = compute_metrics
lowerCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase = self.post_process_function(_a , _a , output.predictions , """predict""" )
lowerCamelCase = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 168
| 0
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A: Union[str, Any] = random.Random()
if is_torch_available():
import torch
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : int=1.0 , UpperCamelCase : Tuple=None , UpperCamelCase : Tuple=None ):
if rng is None:
UpperCAmelCase : List[str] = global_rng
UpperCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=2000 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : Any = min_seq_length
UpperCAmelCase : Dict = max_seq_length
UpperCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase : Optional[Any] = feature_size
UpperCAmelCase : Union[str, Any] = padding_value
UpperCAmelCase : List[Any] = sampling_rate
UpperCAmelCase : Optional[Any] = return_attention_mask
UpperCAmelCase : List[str] = do_normalize
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
def _flatten(_SCREAMING_SNAKE_CASE ):
return list(itertools.chain(*_SCREAMING_SNAKE_CASE ) )
if equal_length:
UpperCAmelCase : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase : int = [np.asarray(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Dict = ASTFeatureExtractor
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = ASTFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase : Dict = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase : Any = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCAmelCase : Any = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
UpperCAmelCase : Any = feat_extract(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_values
UpperCAmelCase : Union[str, Any] = feat_extract(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase : Tuple = np.asarray(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_values
UpperCAmelCase : str = feat_extract(_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
import torch
UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : List[Any] = np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase : Union[str, Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase : Dict = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase : Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCAmelCase : List[str] = ds.sort("""id""" ).select(range(_SCREAMING_SNAKE_CASE ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
UpperCAmelCase : int = self._load_datasamples(1 )
UpperCAmelCase : Any = ASTFeatureExtractor()
UpperCAmelCase : Union[str, Any] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 109
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
A: Optional[Any] = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
A: Optional[int] = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
A: int = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCAmelCase : Tuple = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCAmelCase : Optional[Any] = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 109
| 1
|
from __future__ import annotations
import math
__A = '2020.9.26'
__A = 'xcodz-dot, cclaus, dhruvmanila'
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
_A = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(_lowercase )
_A = ((x * distance) / (z + distance)) * scale
_A = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
_A = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
_A = (
'''Input values except axis must either be float or int: '''
f"""{list(input_variables.values() )}"""
)
raise TypeError(_lowercase )
_A = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
_A = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
_A = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
_A = z
elif axis == "x":
_A = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
_A = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
_A = x
elif axis == "y":
_A = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
_A = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
_A = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(f'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
| 75
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Any ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=__A , )
assert hasattr(self , '''env''' )
def __A ( self: Any , __A: Optional[int] ) -> Union[str, Any]:
_A = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
_A = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__A , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__A , py_version='''py36''' , )
def __A ( self: Optional[int] , __A: Optional[int] ) -> List[Any]:
TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self: Dict , __A: Tuple ) -> Union[str, Any]:
# create estimator
_A = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_A = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_A = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_A = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __A )
| 75
| 1
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _lowerCamelCase):
A_ : Optional[int] = (UnCLIPScheduler,)
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = {
'num_train_timesteps': 10_00,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def __lowerCamelCase ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Union[str, Any] = self.get_scheduler_config(variance_type='fixed_small_log' )
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config(variance_type='learned_range' )
__lowerCAmelCase : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = 0.5
assert scheduler._get_variance(1 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -0.001_0011 < 1E-5
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config()
__lowerCAmelCase : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = scheduler.timesteps
__lowerCAmelCase : Optional[int] = self.dummy_model()
__lowerCAmelCase : int = self.dummy_sample_deter
__lowerCAmelCase : str = torch.manual_seed(0 )
for i, t in enumerate(_SCREAMING_SNAKE_CASE ):
# 1. predict noise residual
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : int = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : str = pred_prev_sample
__lowerCAmelCase : List[str] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.scheduler_classes[0]
__lowerCAmelCase : Any = self.get_scheduler_config()
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(25 )
__lowerCAmelCase : int = scheduler.timesteps
__lowerCAmelCase : List[Any] = self.dummy_model()
__lowerCAmelCase : List[Any] = self.dummy_sample_deter
__lowerCAmelCase : Any = torch.manual_seed(0 )
for i, t in enumerate(_SCREAMING_SNAKE_CASE ):
# 1. predict noise residual
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if i + 1 == timesteps.shape[0]:
__lowerCAmelCase : Any = None
else:
__lowerCAmelCase : Optional[Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : Union[str, Any] = scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : List[Any] = pred_prev_sample
__lowerCAmelCase : str = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass
| 86
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 86
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : str = StableDiffusionSAGPipeline
A : Any = TEXT_TO_IMAGE_PARAMS
A : str = TEXT_TO_IMAGE_BATCH_PARAMS
A : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
A : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
A : int = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
lowercase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : Tuple = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowercase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : Optional[int] = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
lowercase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowercase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
lowercase : Optional[int] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowercase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE__ )
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = '''.'''
lowercase : Tuple = torch.manual_seed(0 )
lowercase : Optional[int] = sag_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase : Optional[Any] = output.images
lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : List[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __lowerCamelCase ( self ):
lowercase : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase : int = sag_pipe.to(SCREAMING_SNAKE_CASE__ )
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : int = '''.'''
lowercase : List[str] = torch.manual_seed(0 )
lowercase : List[str] = sag_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase : Any = output.images
lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : List[Any] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __lowerCamelCase ( self ):
lowercase : Tuple = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase : str = sag_pipe.to(SCREAMING_SNAKE_CASE__ )
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = '''.'''
lowercase : List[str] = torch.manual_seed(0 )
lowercase : List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
lowercase : Union[str, Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 173
|
import os
import pytest
from attr import dataclass
__a = '''us-east-1''' # defaults region
@dataclass
class __SCREAMING_SNAKE_CASE :
A : str
A : str = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A : Union[str, Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
A : str = {**hyperparameters, 'max_steps': 1000}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"""{self.framework}-transfromers-test"""
@property
def __lowerCamelCase ( self ):
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : Union[str, Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 173
| 1
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_SCREAMING_SNAKE_CASE = """\
"""
_SCREAMING_SNAKE_CASE = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_SCREAMING_SNAKE_CASE = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int = 16 , lowerCamelCase_ : bool = True , lowerCamelCase_ : Any=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase = """cuda"""
else:
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCamelCase = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
UpperCamelCase = model.to(lowerCamelCase_ )
UpperCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase = model.config.max_length - 1
else:
UpperCamelCase = model.config.max_length
UpperCamelCase = tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors="""pt""" , return_attention_mask=lowerCamelCase_ , ).to(lowerCamelCase_ )
UpperCamelCase = encodings["""input_ids"""]
UpperCamelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase = []
UpperCamelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ) ):
UpperCamelCase = min(start_index + batch_size , len(lowerCamelCase_ ) )
UpperCamelCase = encoded_texts[start_index:end_index]
UpperCamelCase = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase_ )
UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowerCamelCase_ ), attn_mask] , dim=1 )
UpperCamelCase = encoded_batch
with torch.no_grad():
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).logits
UpperCamelCase = out_logits[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = attn_mask[..., 1:].contiguous()
UpperCamelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowerCamelCase_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase_ )}
| 343
|
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345
| 0
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCAmelCase_ = Lock()
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Any , __UpperCAmelCase: str , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Tuple , __UpperCAmelCase: List[Any] ) -> int:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__UpperCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCamelCase__ : Any = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCamelCase__ : Dict = min(__UpperCAmelCase , __UpperCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__UpperCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCamelCase__ : int = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCamelCase__ : List[str] = max(__UpperCAmelCase , __UpperCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> Optional[int]:
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCamelCase__ : str = Pipe()
UpperCamelCase__ : Optional[Any] = Pipe()
process_array_.append(
Process(
target=__UpperCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCamelCase__ : Any = temp_rs
UpperCamelCase__ : Union[str, Any] = temp_rr
for i in range(1 , len(__UpperCAmelCase ) - 1 ):
UpperCamelCase__ : Optional[int] = Pipe()
UpperCamelCase__ : List[str] = Pipe()
process_array_.append(
Process(
target=__UpperCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCamelCase__ : List[Any] = temp_rs
UpperCamelCase__ : Optional[int] = temp_rr
process_array_.append(
Process(
target=__UpperCAmelCase , args=(
len(__UpperCAmelCase ) - 1,
arr[len(__UpperCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__UpperCAmelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__UpperCAmelCase ) ):
UpperCamelCase__ : Tuple = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> Optional[int]:
UpperCamelCase__ : List[str] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*__UpperCAmelCase )
UpperCamelCase__ : List[str] = odd_even_transposition(__UpperCAmelCase )
print('''Sorted List\n''' )
print(*__UpperCAmelCase )
if __name__ == "__main__":
main()
| 247
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=7, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=99, __magic_name__=32, __magic_name__=5, __magic_name__=4, __magic_name__=37, __magic_name__="gelu", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=512, __magic_name__=16, __magic_name__=2, __magic_name__=0.02, __magic_name__=4, ) -> str:
"""simple docstring"""
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Dict = seq_length
UpperCamelCase__ : Tuple = is_training
UpperCamelCase__ : Any = use_attention_mask
UpperCamelCase__ : Tuple = use_token_type_ids
UpperCamelCase__ : Union[str, Any] = use_labels
UpperCamelCase__ : List[Any] = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : Tuple = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : int = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = max_position_embeddings
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : int = type_sequence_label_size
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : List[str] = num_choices
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ : Tuple = None
if self.use_attention_mask:
UpperCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : int = None
if self.use_token_type_ids:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ : List[Any] = BertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__magic_name__, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = config_and_inputs
UpperCamelCase__ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : str = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = config_and_inputs
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : int = True
a : List[str] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = FlaxBertModelTester(self )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
UpperCamelCase__ : Dict = FlaxBertModel.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 247
| 1
|
import itertools
import os
import re
UpperCamelCase = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
UpperCamelCase = re.compile(R'''([a-z\d])([A-Z])''')
UpperCamelCase = re.compile(R'''(?<!_)_(?!_)''')
UpperCamelCase = re.compile(R'''(_{2,})''')
UpperCamelCase = R"^\w+(\.\w+)*$"
UpperCamelCase = R"<>:/\|?*"
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : List[Any] = _uppercase_uppercase_re.sub(R"\1_\2" , lowerCAmelCase__)
lowercase__ : str = _lowercase_uppercase_re.sub(R"\1_\2" , lowerCAmelCase__)
return name.lower()
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Union[str, Any] = _single_underscore_re.split(lowerCAmelCase__)
lowercase__ : Tuple = [_multiple_underscores_re.split(lowerCAmelCase__) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCAmelCase__) if n != "")
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
if os.path.basename(lowerCAmelCase__) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''')
return camelcase_to_snakecase(lowerCAmelCase__)
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple):
if os.path.basename(lowerCAmelCase__) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''')
if not re.match(_split_re , lowerCAmelCase__):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''')
return f'''{filename_prefix_for_name(lowerCAmelCase__)}-{split}'''
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : str=None):
lowercase__ : int = filename_prefix_for_split(lowerCAmelCase__ , lowerCAmelCase__)
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
lowercase__ : str = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
return f'''{filepath}*'''
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None):
lowercase__ : Optional[Any] = filename_prefix_for_split(lowerCAmelCase__ , lowerCAmelCase__)
lowercase__ : Optional[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
if shard_lengths:
lowercase__ : Any = len(lowerCAmelCase__)
lowercase__ : List[Any] = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowerCAmelCase__)]
if filetype_suffix:
lowercase__ : Union[str, Any] = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
lowercase__ : Optional[Any] = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 87
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : Union[str, Any] = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ["LayoutLMv3FeatureExtractor"]
a_ : List[str] = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 168
| 0
|
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
snake_case__ : List[str] = 1
snake_case__ : int = 1
while repunit:
snake_case__ : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCAmelCase ( __lowerCAmelCase = 1000000 ) -> int:
"""simple docstring"""
snake_case__ : str = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 44
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a :
def __init__( self :str ,__lowercase :Optional[Any] ,__lowercase :List[Any]=1_3 ,__lowercase :str=7 ,__lowercase :Dict=True ,__lowercase :Any=True ,__lowercase :str=True ,__lowercase :Any=True ,__lowercase :Tuple=9_9 ,__lowercase :List[str]=3_2 ,__lowercase :int=5 ,__lowercase :Union[str, Any]=4 ,__lowercase :List[str]=4 ,__lowercase :Any="gelu" ,__lowercase :Any=0.0 ,__lowercase :Tuple=0.1 ,__lowercase :str=True ,__lowercase :Tuple=5_1_2 ,__lowercase :Dict=1_6 ,__lowercase :Tuple=2 ,__lowercase :List[str]=0.02 ,__lowercase :Dict=3 ,__lowercase :Optional[int]=4 ,__lowercase :Tuple=None ,):
snake_case__ : Optional[int] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : Tuple = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : List[Any] = use_token_type_ids
snake_case__ : str = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = intermediate_multiple_size
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = hidden_dropout
snake_case__ : str = attention_dropout
snake_case__ : List[str] = weight_tying
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : str = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : int = num_labels
snake_case__ : int = num_choices
snake_case__ : int = scope
def __lowerCamelCase ( self :List[str] ):
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCamelCase ( self :int ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def __lowerCamelCase ( self :List[Any] ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Optional[Any] ):
snake_case__ : Union[str, Any] = GPTNeoXJapaneseModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Union[str, Any] = model(__lowercase ,attention_mask=__lowercase )
snake_case__ : Optional[Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :Tuple ,__lowercase :Union[str, Any] ):
snake_case__ : Any = True
snake_case__ : Tuple = GPTNeoXJapaneseModel(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : str = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ,__lowercase :Any ):
snake_case__ : Any = GPTNeoXJapaneseForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Any ,__lowercase :int ,__lowercase :List[str] ):
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = GPTNeoXJapaneseForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,use_cache=__lowercase )
snake_case__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case__ : int = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
snake_case__ : Optional[int] = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case__ : Optional[int] = torch.cat([input_mask, next_mask] ,dim=-1 )
snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,output_hidden_states=__lowercase )
snake_case__ : Tuple = output_from_no_past['''hidden_states'''][0]
snake_case__ : List[str] = model(
__lowercase ,attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
# select random slice
snake_case__ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = config_and_inputs
snake_case__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__lowerCAmelCase : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : int = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : str = False
def __lowerCamelCase ( self :Any ):
snake_case__ : int = GPTNeoXJapaneseModelTester(self )
snake_case__ : Any = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
# This regression test was failing with PyTorch < 1.3
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowercase )
@slow
def __lowerCamelCase ( self :Dict ):
snake_case__ : str = '''abeja/gpt-neox-japanese-2.7b'''
snake_case__ : int = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
snake_case__ : Optional[int] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
snake_case__ : Optional[int] = GPTNeoXJapaneseTokenizer.from_pretrained(__lowercase )
snake_case__ : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowercase )
snake_case__ : Optional[int] = []
for prompt in prompts:
snake_case__ : Dict = tokenizer(__lowercase ,return_tensors='''pt''' ).input_ids
snake_case__ : Union[str, Any] = model.generate(__lowercase ,max_length=5_0 )
snake_case__ : int = tokenizer.batch_decode(__lowercase ,skip_special_tokens=__lowercase )
predicted_outputs += generated_string
self.assertListEqual(__lowercase ,__lowercase )
| 44
| 1
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Any = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='efficientformer'
def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =depths
lowerCamelCase_ =mlp_expansion_ratio
lowerCamelCase_ =downsamples
lowerCamelCase_ =dim
lowerCamelCase_ =key_dim
lowerCamelCase_ =attention_ratio
lowerCamelCase_ =resolution
lowerCamelCase_ =pool_size
lowerCamelCase_ =downsample_patch_size
lowerCamelCase_ =downsample_stride
lowerCamelCase_ =downsample_pad
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =num_metaad_blocks
lowerCamelCase_ =distillation
lowerCamelCase_ =use_layer_scale
lowerCamelCase_ =layer_scale_init_value
lowerCamelCase_ =image_size
lowerCamelCase_ =batch_norm_eps
| 75
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ='''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' )
return image
def a_ ( __snake_case : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =dct.pop(__snake_case )
lowerCamelCase_ =val
def a_ ( __snake_case : str , __snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowerCamelCase_ =torch.cat((q_bias, torch.zeros_like(__snake_case , requires_grad=__snake_case ), v_bias) )
lowerCamelCase_ =qkv_bias
def a_ ( __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =364 if '''coco''' in model_name else 224
lowerCamelCase_ =InstructBlipVisionConfig(image_size=__snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowerCamelCase_ =InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
lowerCamelCase_ =InstructBlipConfig(vision_config=__snake_case , text_config=__snake_case , qformer_config=__snake_case )
return config, image_size
@torch.no_grad()
def a_ ( __snake_case : Optional[int] , __snake_case : str=None , __snake_case : List[Any]=False ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
lowerCamelCase_ =TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowerCamelCase_ =LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
lowerCamelCase_, lowerCamelCase_ =get_blipa_config(__snake_case )
lowerCamelCase_ =InstructBlipForConditionalGeneration(__snake_case ).eval()
lowerCamelCase_ ={
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
lowerCamelCase_, lowerCamelCase_ =model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowerCamelCase_ ='''cuda:1''' if torch.cuda.is_available() else '''cpu'''
lowerCamelCase_ ='''cuda:2''' if torch.cuda.is_available() else '''cpu'''
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =load_model_and_preprocess(
name=__snake_case , model_type=__snake_case , is_eval=__snake_case , device=__snake_case )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowerCamelCase_ =original_model.state_dict()
lowerCamelCase_ =create_rename_keys(__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase_ =state_dict.pop(__snake_case )
if key.startswith('''Qformer.bert''' ):
lowerCamelCase_ =key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowerCamelCase_ =key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
lowerCamelCase_ =key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowerCamelCase_ =key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
lowerCamelCase_ =key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
lowerCamelCase_ =key.replace('''t5''' , '''language''' )
lowerCamelCase_ =val
# read in qv biases
read_in_q_v_bias(__snake_case , __snake_case )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__snake_case , strict=__snake_case )
lowerCamelCase_ =load_demo_image()
lowerCamelCase_ ='''What is unusual about this image?'''
# create processor
lowerCamelCase_ =BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__snake_case , image_std=__snake_case )
lowerCamelCase_ =InstructBlipProcessor(
image_processor=__snake_case , tokenizer=__snake_case , qformer_tokenizer=__snake_case , )
lowerCamelCase_ =processor(images=__snake_case , text=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# make sure processor creates exact same pixel values
lowerCamelCase_ =vis_processors['''eval'''](__snake_case ).unsqueeze(0 ).to(__snake_case )
lowerCamelCase_ =inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __snake_case )
original_model.to(__snake_case )
hf_model.to(__snake_case )
with torch.no_grad():
if "vicuna" in model_name:
lowerCamelCase_ =original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
lowerCamelCase_ =hf_model(**__snake_case ).logits
else:
lowerCamelCase_ =original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
lowerCamelCase_ =tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(__snake_case )
lowerCamelCase_ =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowerCamelCase_ =hf_model(**__snake_case , labels=__snake_case ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowerCamelCase_ =1e-4 if '''vicuna''' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __snake_case , atol=__snake_case )
print('''Looks ok!''' )
print('''Generating with original model...''' )
lowerCamelCase_ =original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
lowerCamelCase_ =hf_model.generate(
**__snake_case , do_sample=__snake_case , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowerCamelCase_ =2
print('''Original generation:''' , __snake_case )
lowerCamelCase_ =processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )
lowerCamelCase_ =[text.strip() for text in output_text]
print('''HF generation:''' , __snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__snake_case )
hf_model.save_pretrained(__snake_case )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
a_ : Any = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
a_ : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 75
| 1
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = " " ):
lowercase__ = []
lowercase__ = 0
for index, char in enumerate(SCREAMING_SNAKE_CASE_ ):
if char == separator:
split_words.append(string[last_index:index] )
lowercase__ = index + 1
elif index + 1 == len(SCREAMING_SNAKE_CASE_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 224
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
lowercase_ = parser.parse_args()
if args.model_type == "roberta":
lowercase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
lowercase_ = """roberta"""
elif args.model_type == "gpt2":
lowercase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
lowercase_ = """transformer"""
lowercase_ = model.state_dict()
lowercase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowercase_ = state_dict[F'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowercase_ = F'{prefix}.embeddings.{w}.weight'
lowercase_ = state_dict[param_name]
for w in ["weight", "bias"]:
lowercase_ = F'{prefix}.embeddings.LayerNorm.{w}'
lowercase_ = state_dict[param_name]
# Transformer Blocks #
lowercase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowercase_ = state_dict[
F'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
lowercase_ = state_dict[F'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowercase_ = state_dict[
F'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowercase_ = state_dict[F'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase_ = state_dict[F'lm_head.dense.{w}']
lowercase_ = state_dict[F'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowercase_ = state_dict[F'{prefix}.ln_f.{w}']
lowercase_ = state_dict["""lm_head.weight"""]
print(F'N layers selected for distillation: {std_idx}')
print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(F'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 224
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class a ( UpperCAmelCase__ ):
UpperCamelCase : torch.FloatTensor
class a ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : List[str] , lowerCAmelCase : int = 6_5536 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 0 , lowerCAmelCase : str = "fourier" , lowerCAmelCase : bool = True , lowerCAmelCase : bool = False , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase : Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase : str = None , lowerCAmelCase : Tuple[int] = (32, 32, 64) , lowerCAmelCase : str = None , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE_: Tuple =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase , log=lowerCAmelCase , flip_sin_to_cos=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE_: Dict =Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase , downscale_freq_shift=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE_: Optional[Any] =block_out_channels[0] * 4
SCREAMING_SNAKE_CASE_: str =TimestepEmbedding(
in_channels=lowerCAmelCase , time_embed_dim=lowerCAmelCase , act_fn=lowerCAmelCase , out_dim=block_out_channels[0] , )
SCREAMING_SNAKE_CASE_: Optional[int] =nn.ModuleList([] )
SCREAMING_SNAKE_CASE_: int =None
SCREAMING_SNAKE_CASE_: List[Any] =nn.ModuleList([] )
SCREAMING_SNAKE_CASE_: Any =None
# down
SCREAMING_SNAKE_CASE_: Tuple =in_channels
for i, down_block_type in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Any =output_channel
SCREAMING_SNAKE_CASE_: List[Any] =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE_: Optional[Any] =i == len(lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_: Any =get_down_block(
lowerCAmelCase , num_layers=lowerCAmelCase , in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase )
# mid
SCREAMING_SNAKE_CASE_: str =get_mid_block(
lowerCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase , add_downsample=lowerCAmelCase , )
# up
SCREAMING_SNAKE_CASE_: Any =list(reversed(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: str =reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =out_channels
else:
SCREAMING_SNAKE_CASE_: str =block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =output_channel
SCREAMING_SNAKE_CASE_: str =(
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase ) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE_: Tuple =i == len(lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_: List[str] =get_up_block(
lowerCAmelCase , num_layers=lowerCAmelCase , in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =output_channel
# out
SCREAMING_SNAKE_CASE_: List[str] =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
SCREAMING_SNAKE_CASE_: Tuple =get_out_block(
out_block_type=lowerCAmelCase , num_groups_out=lowerCAmelCase , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase , act_fn=lowerCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Union[torch.Tensor, float, int] , lowerCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =timestep
if not torch.is_tensor(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_: List[Any] =timesteps[None].to(sample.device )
SCREAMING_SNAKE_CASE_: Any =self.time_proj(lowerCAmelCase )
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE_: List[Any] =self.time_mlp(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: str =timestep_embed[..., None]
SCREAMING_SNAKE_CASE_: int =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
SCREAMING_SNAKE_CASE_: Optional[Any] =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
SCREAMING_SNAKE_CASE_: List[Any] =()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =downsample_block(hidden_states=lowerCAmelCase , temb=lowerCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE_: Any =self.mid_block(lowerCAmelCase , lowerCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE_: Optional[int] =down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE_: Union[str, Any] =upsample_block(lowerCAmelCase , res_hidden_states_tuple=lowerCAmelCase , temb=lowerCAmelCase )
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE_: str =self.out_block(lowerCAmelCase , lowerCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase )
| 173
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE_: Tuple =[(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __magic_name__ ( lowercase , lowercase , lowercase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_: Dict =""""""
else:
SCREAMING_SNAKE_CASE_: Dict ="""deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_: Any =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_: Any =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_: int =in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_: List[str] =in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_: Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_: Optional[int] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_: int =in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_: str =in_proj_bias[-config.hidden_size :]
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =dct.pop(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =val
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE_: Dict =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE_: Optional[int] =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_: Dict =1000
SCREAMING_SNAKE_CASE_: Tuple ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE_: int ="""imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE_: Optional[int] =json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: Dict ={int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: int =idalabel
SCREAMING_SNAKE_CASE_: List[str] ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Any =int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE_: int =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
SCREAMING_SNAKE_CASE_: str =192
SCREAMING_SNAKE_CASE_: str =768
SCREAMING_SNAKE_CASE_: str =12
SCREAMING_SNAKE_CASE_: List[str] =3
elif deit_name[9:].startswith("""small""" ):
SCREAMING_SNAKE_CASE_: List[str] =384
SCREAMING_SNAKE_CASE_: Optional[Any] =1536
SCREAMING_SNAKE_CASE_: Dict =12
SCREAMING_SNAKE_CASE_: List[Any] =6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
SCREAMING_SNAKE_CASE_: str =1024
SCREAMING_SNAKE_CASE_: Any =4096
SCREAMING_SNAKE_CASE_: str =24
SCREAMING_SNAKE_CASE_: str =16
# load original model from timm
SCREAMING_SNAKE_CASE_: int =timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_: List[Any] =timm_model.state_dict()
SCREAMING_SNAKE_CASE_: int =create_rename_keys(lowercase , lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase , lowercase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_: List[Any] =DeiTForImageClassificationWithTeacher(lowercase ).eval()
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE_: Tuple =int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE_: Optional[int] =DeiTImageProcessor(size=lowercase , crop_size=config.image_size )
SCREAMING_SNAKE_CASE_: List[Any] =image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =encoding["""pixel_values"""]
SCREAMING_SNAKE_CASE_: Tuple =model(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1e-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 173
| 1
|
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def _lowercase ( __snake_case ) -> Callable:
@wraps(__snake_case )
def _inner_fn(*__snake_case ,**__snake_case ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") ,__snake_case ,)
return fn(*__snake_case ,**__snake_case )
return _inner_fn
| 350
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__snake_case : int = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['input_features']
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]=80 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1_6000 , _SCREAMING_SNAKE_CASE: Optional[int]=160 , _SCREAMING_SNAKE_CASE: Dict=30 , _SCREAMING_SNAKE_CASE: str=400 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> List[Any]:
"""simple docstring"""
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = n_fft
__lowerCAmelCase : Any = hop_length
__lowerCAmelCase : List[Any] = chunk_length
__lowerCAmelCase : Dict = chunk_length * sampling_rate
__lowerCAmelCase : Optional[int] = self.n_samples // hop_length
__lowerCAmelCase : Tuple = sampling_rate
__lowerCAmelCase : int = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: np.array) -> np.ndarray:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.n_fft , "hann") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
__lowerCAmelCase : Dict = log_spec[:, :-1]
__lowerCAmelCase : List[str] = np.maximum(_SCREAMING_SNAKE_CASE , log_spec.max() - 8.0)
__lowerCAmelCase : Union[str, Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: List[np.ndarray] , _SCREAMING_SNAKE_CASE: List[np.ndarray] , _SCREAMING_SNAKE_CASE: float = 0.0) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
__lowerCAmelCase : List[str] = np.array(_SCREAMING_SNAKE_CASE , np.intaa)
__lowerCAmelCase : int = []
for vector, length in zip(_SCREAMING_SNAKE_CASE , attention_mask.sum(-1)):
__lowerCAmelCase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
__lowerCAmelCase : List[str] = padding_value
normed_input_values.append(_SCREAMING_SNAKE_CASE)
else:
__lowerCAmelCase : int = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __call__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[str] = "max_length" , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
__lowerCAmelCase : str = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
__lowerCAmelCase : Optional[int] = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__lowerCAmelCase : str = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray):
__lowerCAmelCase : Union[str, Any] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa)
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__lowerCAmelCase : List[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__lowerCAmelCase : str = [np.asarray([raw_speech]).T]
__lowerCAmelCase : str = BatchFeature({"input_features": raw_speech})
# convert into correct format for padding
__lowerCAmelCase : Optional[int] = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__lowerCAmelCase : List[str] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
__lowerCAmelCase : Dict = np.stack(padded_inputs["input_features"] , axis=0)
# make sure list is in array format
__lowerCAmelCase : Union[str, Any] = padded_inputs.get("input_features").transpose(2 , 0 , 1)
__lowerCAmelCase : Dict = [self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE) for waveform in input_features[0]]
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Dict = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa) for feature in input_features]
else:
__lowerCAmelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__lowerCAmelCase : Optional[Any] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
__lowerCAmelCase : List[str] = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE)
return padded_inputs
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
__lowerCAmelCase : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 58
| 0
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
raise RuntimeError("CUDA out of memory." )
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
super().__init__()
A__ = nn.Linear(3 , 4 )
A__ = nn.BatchNormad(4 )
A__ = nn.Linear(4 , 5 )
def __magic_name__ ( self : Tuple , snake_case_ : int ) -> List[str]:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case_ ) ) )
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(snake_case_ : Dict ):
nonlocal batch_sizes
batch_sizes.append(snake_case_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(snake_case_ , [128, 64, 32, 16, 8] )
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(snake_case_ : Any , snake_case_ : List[str] ):
nonlocal batch_sizes
batch_sizes.append(snake_case_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
A__, A__ = mock_training_loop_function("hello" )
self.assertListEqual(snake_case_ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def __magic_name__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(snake_case_ : Optional[Any] ):
pass
with self.assertRaises(snake_case_ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(snake_case_ : Tuple ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(snake_case_ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __magic_name__ ( self : Tuple ) -> int:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(snake_case_ ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def __magic_name__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(snake_case_ : Tuple ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(snake_case_ ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ = torch.cuda.memory_allocated()
A__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , snake_case_ )
A__ = release_memory(snake_case_ )
self.assertEqual(torch.cuda.memory_allocated() , snake_case_ )
| 247
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict=13 , snake_case_ : Optional[Any]=32 , snake_case_ : List[Any]=3 , snake_case_ : Dict=4 , snake_case_ : Tuple=[10, 20, 30, 40] , snake_case_ : int=[2, 2, 3, 2] , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=True , snake_case_ : Union[str, Any]=37 , snake_case_ : Any="gelu" , snake_case_ : Union[str, Any]=10 , snake_case_ : str=0.02 , snake_case_ : str=["stage2", "stage3", "stage4"] , snake_case_ : str=3 , snake_case_ : List[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = type_sequence_label_size
A__ = initializer_range
A__ = out_features
A__ = num_labels
A__ = scope
A__ = num_stages
def __magic_name__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __magic_name__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=snake_case_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=snake_case_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __magic_name__ ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ = UperNetForSemanticSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __magic_name__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : int ) -> int:
'''simple docstring'''
A__ = UperNetModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __magic_name__ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __magic_name__ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __magic_name__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
def __magic_name__ ( self : List[Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
A__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(snake_case_ )
A__ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A__ = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def __magic_name__ ( self : Any ) -> str:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = UperNetForSemanticSegmentation.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> int:
A__ = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
A__ = Image.open(lowercase_ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> List[Any]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
| 247
| 1
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A (snake_case_):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
snake_case_ = tempfile.mkdtemp()
snake_case_ = 5
# Realm tok
snake_case_ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case_ = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
snake_case_ = os.path.join(UpperCAmelCase_ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
snake_case_ = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->RealmTokenizer:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def lowerCAmelCase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
snake_case_ = np.array(
[
B"""This is the first record""",
B"""This is the second record""",
B"""This is the third record""",
B"""This is the fourth record""",
B"""This is the fifth record""",
B"""This is a longer longer longer record""",
] , dtype=UpperCAmelCase_ , )
return block_records
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_config()
snake_case_ = self.get_dummy_retriever()
snake_case_ = retriever.tokenizer
snake_case_ = np.array([0, 3] , dtype="""long""" )
snake_case_ = tokenizer(["""Test question"""] ).input_ids
snake_case_ = tokenizer(
["""the fourth"""] , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , ).input_ids
snake_case_ = config.reader_seq_len
snake_case_ , snake_case_ , snake_case_ , snake_case_ = retriever(
UpperCAmelCase_ , UpperCAmelCase_ , answer_ids=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors="""np""" )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_config()
snake_case_ = self.get_dummy_retriever()
snake_case_ = retriever.tokenizer
snake_case_ = np.array([0, 3, 5] , dtype="""long""" )
snake_case_ = tokenizer(["""Test question"""] ).input_ids
snake_case_ = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , ).input_ids
snake_case_ = config.reader_seq_len
snake_case_ , snake_case_ , snake_case_ , snake_case_ = retriever(
UpperCAmelCase_ , UpperCAmelCase_ , answer_ids=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors="""np""" )
self.assertEqual([False, True, True] , UpperCAmelCase_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
snake_case_ = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
snake_case_ = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case_ = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
| 353
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""})
__lowercase: Optional[str] = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""})
__lowercase: Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""})
__lowercase: Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""})
__lowercase: Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""})
__lowercase: Optional[int] = field(
default=1_00_00 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""})
__lowercase: Optional[float] = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""})
__lowercase: Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""})
__lowercase: Optional[int] = field(
default=7_50 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""})
__lowercase: Optional[int] = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""})
__lowercase: Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""})
__lowercase: Optional[int] = field(default=5_00_00 , metadata={"""help""": """Maximum number of training steps."""})
__lowercase: Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""})
__lowercase: Optional[int] = field(default=10_24 , metadata={"""help""": """Sequence lengths used for training."""})
__lowercase: Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""})
__lowercase: Optional[int] = field(
default=10_24 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__lowercase: Optional[str] = field(
default=snake_case__ , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""})
__lowercase: Optional[bool] = field(default=snake_case__ , metadata={"""help""": """If True the data is pretokenized."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""})
__lowercase: Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""})
__lowercase: Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""})
__lowercase: Optional[int] = field(default=10_24 , metadata={"""help""": """Length of sequences to be evaluated."""})
__lowercase: Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""})
__lowercase: Optional[int] = field(default=snake_case__ , metadata={"""help""": """Number of workers used for code evaluation."""})
__lowercase: Optional[int] = field(
default=snake_case__ , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__lowercase: Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """Sample from the language model's output distribution."""})
__lowercase: Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""})
__lowercase: Optional[int] = field(default=2_56 , metadata={"""help""": """Maximum number of newly generated tokens."""})
__lowercase: Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""})
__lowercase: Optional[float] = field(default=0.9_5 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""})
__lowercase: Optional[int] = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""})
__lowercase: Optional[int] = field(
default=2_00 , metadata={"""help""": """Number of completions to generate for each sample."""})
__lowercase: Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""})
__lowercase: Optional[str] = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""})
__lowercase: Optional[str] = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""})
__lowercase: Optional[int] = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = field(
default=snake_case__ , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__lowercase: Optional[str] = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""})
__lowercase: Optional[str] = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""})
__lowercase: Optional[int] = field(
default=10_00_00 , metadata={"""help""": """Number of files to save per JSON output file."""})
__lowercase: Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""})
__lowercase: Optional[float] = field(
default=10_00 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=1_00 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=0.2_5 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""})
__lowercase: Optional[float] = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__lowercase: Optional[bool] = field(
default=snake_case__ , metadata={"""help""": """If True, near-duplicate samples are removed."""})
__lowercase: Optional[float] = field(
default=0.8_5 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""})
__lowercase: Optional[str] = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""})
__lowercase: Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""})
__lowercase: Optional[int] = field(default=20_00_00 , metadata={"""help""": """Number of examples to train tokenizer on."""})
__lowercase: Optional[int] = field(
default=3_27_68 , metadata={"""help""": """Number of examples to train the tokenizer on."""})
__lowercase: Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""})
__lowercase: Optional[bool] = field(default=snake_case__ , metadata={"""help""": """Push saved tokenizer to the hub."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""})
__lowercase: Optional[str] = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""})
__lowercase: Optional[int] = field(default=snake_case__ , metadata={"""help""": """Number of workers used for code evaluation."""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[str] = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""})
__lowercase: Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""})
__lowercase: Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""})
__lowercase: Optional[bool] = field(default=snake_case__ , metadata={"""help""": """Push saved tokenizer to the hub."""})
| 233
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> bool:
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) + 1
_lowerCAmelCase : Any = len(_lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCAmelCase : Union[str, Any] = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
# since string of zero length match pattern of zero length
_lowerCAmelCase : Tuple = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 ,_lowerCamelCase ):
_lowerCAmelCase : List[str] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 ,_lowerCamelCase ):
_lowerCAmelCase : Any = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 ,_lowerCamelCase ):
for j in range(1 ,_lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCAmelCase : int = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCAmelCase : Any = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCAmelCase : Optional[Any] = dp[i - 1][j]
else:
_lowerCAmelCase : Dict = 0
else:
_lowerCAmelCase : str = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_a : Tuple = 'aab'
_a : int = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 44
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351
|
__UpperCAmelCase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 257
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
lowercase__ : Optional[int] = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase__ : Optional[Any] = BASE_URL + """/user"""
# https://github.com/settings/tokens
lowercase__ : Dict = os.environ.get("""USER_TOKEN""", """""")
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> dict[Any, Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = {
'Authorization': f"token {auth_token}",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 224
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(default="""summarization""", metadata={"""include_in_asdict_even_if_is_default""": True} )
_SCREAMING_SNAKE_CASE = Features({"""text""": Value("""string""" )} )
_SCREAMING_SNAKE_CASE = Features({"""summary""": Value("""string""" )} )
_SCREAMING_SNAKE_CASE = "text"
_SCREAMING_SNAKE_CASE = "summary"
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return {self.text_column: "text", self.summary_column: "summary"}
| 224
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def _A (self ):
torch.manual_seed(0 )
__lowercase= UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _A (self ):
__lowercase= self.dummy_uncond_unet
__lowercase= KarrasVeScheduler()
__lowercase= KarrasVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe(num_inference_steps=2 , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= torch.manual_seed(0 )
__lowercase= pipe(num_inference_steps=2 , generator=lowerCAmelCase , output_type='numpy' , return_dict=lowerCAmelCase )[0]
__lowercase= image[0, -3:, -3:, -1]
__lowercase= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase= np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= 'google/ncsnpp-celebahq-256'
__lowercase= UNetaDModel.from_pretrained(lowerCAmelCase )
__lowercase= KarrasVeScheduler()
__lowercase= KarrasVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe(num_inference_steps=2_0 , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowercase= np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 363
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 0
|
def UpperCAmelCase ( a_ , a_ ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a_ ( snake_case_ ):
'''simple docstring'''
def snake_case_( self ) -> Tuple:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(A )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self._create_example_records()
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(A ):
self.assertDictEqual(A , example_records[i] )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self._create_example_records()
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
_SCREAMING_SNAKE_CASE = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def snake_case_( self ) -> Union[str, Any]: # checks what happens with missing columns
_SCREAMING_SNAKE_CASE = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def snake_case_( self ) -> Optional[Any]: # checks if the type can be inferred from the second record
_SCREAMING_SNAKE_CASE = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = Dataset.from_list([] )
self.assertEqual(len(A ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 58
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_ ( __lowercase ):
def __init__( self : List[Any] , _snake_case : str , _snake_case : Optional[Any] )->List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self : List[str] , _snake_case : int = 1 , _snake_case : int = 100 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[float] = None , _snake_case : bool = True , )->Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
__lowerCAmelCase : Optional[int] = self.unet.config.sample_size / self.unet.config.sample_rate
__lowerCAmelCase : int = audio_length_in_s * self.unet.config.sample_rate
__lowerCAmelCase : List[str] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__lowerCAmelCase : Union[str, Any] = int(_snake_case )
if sample_size % down_scale_factor != 0:
__lowerCAmelCase : str = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
__lowerCAmelCase : List[Any] = int(_snake_case )
__lowerCAmelCase : Any = next(iter(self.unet.parameters() ) ).dtype
__lowerCAmelCase : Any = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_snake_case )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__lowerCAmelCase : int = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
# set step values
self.scheduler.set_timesteps(_snake_case , device=audio.device )
__lowerCAmelCase : Dict = self.scheduler.timesteps.to(_snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCAmelCase : str = self.unet(_snake_case , _snake_case ).sample
# 2. compute previous image: x_t -> t_t-1
__lowerCAmelCase : Union[str, Any] = self.scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
__lowerCAmelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy()
__lowerCAmelCase : Dict = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_snake_case )
| 232
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list ) -> Dict:
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if n == 0:
return 0
__lowerCAmelCase : Union[str, Any] = float("""-inf""" )
for i in range(1 , n + 1 ):
__lowerCAmelCase : Union[str, Any] = max(
SCREAMING_SNAKE_CASE , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE ) )
return max_revue
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list ) -> List[str]:
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list , SCREAMING_SNAKE_CASE :list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__lowerCAmelCase : str = float("""-inf""" )
for i in range(1 , n + 1 ):
__lowerCAmelCase : List[Any] = max(
SCREAMING_SNAKE_CASE , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
__lowerCAmelCase : List[str] = max_revenue
return max_rev[n]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list ) -> Union[str, Any]:
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__lowerCAmelCase : Optional[int] = [float("""-inf""" ) for _ in range(n + 1 )]
__lowerCAmelCase : List[str] = 0
for i in range(1 , n + 1 ):
__lowerCAmelCase : Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
__lowerCAmelCase : Optional[int] = max(SCREAMING_SNAKE_CASE , prices[j - 1] + max_rev[i - j] )
__lowerCAmelCase : Optional[int] = max_revenue_i
return max_rev[n]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list ) -> List[Any]:
if n < 0:
__lowerCAmelCase : Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE )
if n > len(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = (
"""Each integral piece of rod must have a corresponding price. """
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowerCAmelCase : Tuple = [6, 10, 12, 15, 20, 23]
__lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__lowerCAmelCase : Union[str, Any] = 36
__lowerCAmelCase : Optional[int] = top_down_cut_rod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 232
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError("""String lengths must match!""" )
lowerCamelCase__ : int = 0
for chara, chara in zip(UpperCamelCase , UpperCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_A : Any = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_A : Tuple = '''document_qa'''
_A : Dict = AutoProcessor
_A : Tuple = VisionEncoderDecoderModel
_A : Optional[int] = ['''image''', '''text''']
_A : Optional[int] = ['''text''']
def __init__( self : Any , *__a : List[str] , **__a : Any ) -> Optional[Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*__a , **__a )
def lowerCAmelCase ( self : List[Any] , __a : "Image" , __a : str ) -> List[str]:
"""simple docstring"""
__lowercase : int = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowercase : str = task_prompt.replace("""{user_input}""" , __a )
__lowercase : Union[str, Any] = self.pre_processor.tokenizer(
__a , add_special_tokens=__a , return_tensors="""pt""" ).input_ids
__lowercase : int = self.pre_processor(__a , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase ( self : Optional[int] , __a : int ) -> int:
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.pre_processor.batch_decode(__a )[0]
__lowercase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
__lowercase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
__lowercase : Optional[Any] = re.sub(r"""<.*?>""" , """""" , __a , count=1 ).strip() # remove first task start token
__lowercase : Dict = self.pre_processor.tokenajson(__a )
return sequence["answer"]
| 233
| 0
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
A_ : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
try:
with open(lowerCAmelCase_ , """rb""" ) as flax_state_f:
_UpperCAmelCase : str = from_bytes(lowerCAmelCase_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowerCAmelCase_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_UpperCAmelCase : Union[str, Any] = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase_ : x.dtype == jnp.bfloataa , lowerCAmelCase_ ) ).values()
if any(lowerCAmelCase_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_UpperCAmelCase : Union[str, Any] = jax.tree_util.tree_map(
lambda lowerCAmelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase_ )
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Any = flatten_dict(lowerCAmelCase_ , sep=""".""" )
_UpperCAmelCase : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
_UpperCAmelCase : str = []
_UpperCAmelCase : Optional[int] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_UpperCAmelCase : int = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_UpperCAmelCase : Dict = flax_key_tuple_array[:-1] + ["""weight"""]
_UpperCAmelCase : List[Any] = jnp.transpose(lowerCAmelCase_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_UpperCAmelCase : int = flax_key_tuple_array[:-1] + ["""weight"""]
_UpperCAmelCase : Union[str, Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_UpperCAmelCase : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
_UpperCAmelCase : Any = """.""".join(lowerCAmelCase_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
_UpperCAmelCase : int = np.asarray(lowerCAmelCase_ ) if not isinstance(lowerCAmelCase_ , np.ndarray ) else flax_tensor
_UpperCAmelCase : Union[str, Any] = torch.from_numpy(lowerCAmelCase_ )
# remove from missing keys
missing_keys.remove(lowerCAmelCase_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase_ )
pt_model.load_state_dict(lowerCAmelCase_ )
# re-transform missing_keys to list
_UpperCAmelCase : Tuple = list(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(lowerCAmelCase_ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
return pt_model
| 350
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349
| 0
|
"""simple docstring"""
# Imports
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
if red is not None:
UpperCAmelCase_ : Union[str, Any] = red
if green is not None:
UpperCAmelCase_ : Dict = green
if blue is not None:
UpperCAmelCase_ : Optional[int] = blue
if red_edge is not None:
UpperCAmelCase_ : Optional[int] = red_edge
if nir is not None:
UpperCAmelCase_ : str = nir
return True
def UpperCamelCase__ ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ):
"""simple docstring"""
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
UpperCAmelCase_ : str = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCamelCase__ ( self , lowercase_=0.08 , lowercase_=1.22 , lowercase_=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir - self.green
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCamelCase__ ( self , lowercase_=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCamelCase__ ( self , lowercase_=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCamelCase__ ( self , lowercase_=None , lowercase_=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCAmelCase_ : Optional[Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 61
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : int = MBartConfig
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Union[str, Any] = '''gelu'''
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=False , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=20 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFMBartModel(config=_A ).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['head_mask']
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
__SCREAMING_SNAKE_CASE = past_key_values[1]
def __lowercase ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ) -> str:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Any = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCamelCase__ : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ : Optional[Any] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Union[str, Any] = False
def _A ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFMBartModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCamelCase__ : str = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCamelCase__ : List[str] = '''facebook/mbart-large-en-ro'''
@cached_property
def _A ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _A ( self , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _A ( self , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **_A , return_tensors='tf' )
__SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _A ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 257
| 0
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
__snake_case : List[str] = None
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[str] = str(distributed_port + 1 )
__snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
__snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
__snake_case : Optional[int] = question_hidden_states.shape[0]
__snake_case : Optional[Any] = []
__snake_case : Any = []
if self._is_main():
assert len(__magic_name__ ) == world_size
__snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
__snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 13
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__UpperCamelCase = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _A ( __lowercase ):
lowercase__: Any = VOCAB_FILES_NAMES
lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask''']
lowercase__: List[str] = BartTokenizer
def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
__snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
__snake_case : str = add_prefix_space
__snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ )
__snake_case : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : Any = """post_processor"""
__snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
__snake_case : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Tuple = tuple(state["""sep"""] )
if "cls" in state:
__snake_case : int = tuple(state["""cls"""] )
__snake_case : Optional[int] = False
if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : Optional[Any] = add_prefix_space
__snake_case : List[str] = True
if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets:
__snake_case : Optional[int] = trim_offsets
__snake_case : Any = True
if changes_to_apply:
__snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) )
__snake_case : List[Any] = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
__snake_case : Union[str, Any] = value
def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 13
| 1
|
from copy import deepcopy
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase = None,__lowerCamelCase = None ):
if arr is None and size is not None:
A__ = size
A__ = [0] * size
elif arr is not None:
self.init(_A )
else:
raise ValueError('''Either arr or size must be specified''' )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = len(_A )
A__ = deepcopy(_A )
for i in range(1,self.size ):
A__ = self.next_(_A )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCamelCase ( self ):
A__ = self.tree[:]
for i in range(self.size - 1,0,-1 ):
A__ = self.next_(_A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCamelCase ( __lowerCamelCase ):
return index + (index & (-index))
@staticmethod
def UpperCamelCase ( __lowerCamelCase ):
return index - (index & (-index))
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A__ = self.next_(_A )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
self.add(_A,value - self.get(_A ) )
def UpperCamelCase ( self,__lowerCamelCase ):
if right == 0:
return 0
A__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A__ = self.prev(_A )
return result
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
return self.prefix(_A ) - self.prefix(_A )
def UpperCamelCase ( self,__lowerCamelCase ):
return self.query(_A,index + 1 )
def UpperCamelCase ( self,__lowerCamelCase ):
value -= self.tree[0]
if value < 0:
return -1
A__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193
|
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : str ) -> tuple[list[int], list[int]]:
UpperCAmelCase_ : Dict = [ord(_A ) for i in text]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
for i in plain:
UpperCAmelCase_ : int = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : list[int] , _A : list[int] ) -> str:
UpperCAmelCase_ : Dict = []
for i in range(len(_A ) ):
UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304
| 0
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
snake_case_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
snake_case_ = (
subprocess.check_output(F'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('utf-8').split()
)
snake_case_ = '|'.join(sys.argv[1:])
snake_case_ = re.compile(RF'^({joined_dirs}).*?\.py$')
snake_case_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 350
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238
| 0
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'char'
_A = 'bpe'
_A = 'wp'
lowercase : int = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = ['image_processor', 'char_tokenizer']
_A = 'ViTImageProcessor'
_A = 'MgpstrTokenizer'
def __init__( self :Optional[Any] , a :str=None , a :List[str]=None , **a :List[Any] ) -> int:
__UpperCamelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
__UpperCamelCase : List[str] = kwargs.pop("feature_extractor" )
__UpperCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
__UpperCamelCase : str = tokenizer
__UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("gpt2" )
__UpperCamelCase : int = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(a , a )
def __call__( self :List[str] , a :Any=None , a :Optional[int]=None , a :Tuple=None , **a :Optional[Any] ) -> Optional[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
__UpperCamelCase : int = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__UpperCamelCase : Optional[Any] = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__UpperCamelCase : Union[str, Any] = encodings["input_ids"]
return inputs
def _lowerCamelCase ( self :List[Any] , a :Union[str, Any] ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : int = sequences
__UpperCamelCase : Dict = char_preds.size(0 )
__UpperCamelCase , __UpperCamelCase : Tuple = self._decode_helper(a , "char" )
__UpperCamelCase , __UpperCamelCase : List[Any] = self._decode_helper(a , "bpe" )
__UpperCamelCase , __UpperCamelCase : int = self._decode_helper(a , "wp" )
__UpperCamelCase : List[str] = []
__UpperCamelCase : List[Any] = []
for i in range(a ):
__UpperCamelCase : Dict = [char_scores[i], bpe_scores[i], wp_scores[i]]
__UpperCamelCase : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
__UpperCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__UpperCamelCase : int = {}
__UpperCamelCase : Union[str, Any] = final_strs
__UpperCamelCase : Any = final_scores
__UpperCamelCase : Union[str, Any] = char_strs
__UpperCamelCase : Tuple = bpe_strs
__UpperCamelCase : Dict = wp_strs
return out
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Optional[Any] ) -> Optional[int]:
if format == DecodeType.CHARACTER:
__UpperCamelCase : str = self.char_decode
__UpperCamelCase : str = 1
__UpperCamelCase : int = "[s]"
elif format == DecodeType.BPE:
__UpperCamelCase : Dict = self.bpe_decode
__UpperCamelCase : Any = 2
__UpperCamelCase : str = "#"
elif format == DecodeType.WORDPIECE:
__UpperCamelCase : Optional[int] = self.wp_decode
__UpperCamelCase : int = 1_0_2
__UpperCamelCase : List[Any] = "[SEP]"
else:
raise ValueError(f'Format {format} is not supported.' )
__UpperCamelCase , __UpperCamelCase : Optional[Any] = [], []
__UpperCamelCase : Dict = pred_logits.size(0 )
__UpperCamelCase : Union[str, Any] = pred_logits.size(1 )
__UpperCamelCase , __UpperCamelCase : int = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__UpperCamelCase : int = preds_index.view(-1 , a )[:, 1:]
__UpperCamelCase : Dict = decoder(a )
__UpperCamelCase , __UpperCamelCase : Dict = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__UpperCamelCase : Tuple = preds_max_prob[:, 1:]
for index in range(a ):
__UpperCamelCase : List[str] = preds_str[index].find(a )
__UpperCamelCase : List[str] = preds_str[index][:pred_eos]
__UpperCamelCase : Any = preds_index[index].cpu().tolist()
__UpperCamelCase : Optional[int] = pred_index.index(a ) if eos_token in pred_index else -1
__UpperCamelCase : Any = preds_max_prob[index][: pred_eos_index + 1]
__UpperCamelCase : Tuple = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _lowerCamelCase ( self :Optional[int] , a :Optional[int] ) -> Dict:
__UpperCamelCase : Tuple = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _lowerCamelCase ( self :Optional[Any] , a :Union[str, Any] ) -> Any:
return self.bpe_tokenizer.batch_decode(a )
def _lowerCamelCase ( self :Union[str, Any] , a :Union[str, Any] ) -> Dict:
__UpperCamelCase : Optional[int] = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 232
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[int]]) -> bool:
'''simple docstring'''
__UpperCamelCase : Any = len(_lowerCamelCase)
# We need to create solution object to save path.
__UpperCamelCase : List[str] = [[0 for _ in range(_lowerCamelCase)] for _ in range(_lowerCamelCase)]
__UpperCamelCase : Optional[int] = run_maze(_lowerCamelCase , 0 , 0 , _lowerCamelCase)
if solved:
print("\n".join(str(_lowerCamelCase) for row in solutions))
else:
print("No solution exists!")
return solved
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[list[int]]) -> bool:
'''simple docstring'''
__UpperCamelCase : Tuple = len(_lowerCamelCase)
# Final check point.
if i == j == (size - 1):
__UpperCamelCase : Optional[int] = 1
return True
__UpperCamelCase : List[Any] = (not i < 0) and (not j < 0) # Check lower bounds
__UpperCamelCase : List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__UpperCamelCase : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__UpperCamelCase : Tuple = 1
# check for directions
if (
run_maze(_lowerCamelCase , i + 1 , _lowerCamelCase , _lowerCamelCase)
or run_maze(_lowerCamelCase , _lowerCamelCase , j + 1 , _lowerCamelCase)
or run_maze(_lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase)
or run_maze(_lowerCamelCase , _lowerCamelCase , j - 1 , _lowerCamelCase)
):
return True
__UpperCamelCase : Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
| 1
|
lowercase : int = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowercase : Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowercase : Any = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 225
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase : Optional[Any] = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
lowercase : List[str] = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = VOCAB_FILES_NAMES
__A : str = PRETRAINED_VOCAB_FILES_MAP
__A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Any = ['''input_ids''', '''attention_mask''']
__A : Tuple = MvpTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
a__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : Dict = getattr(lowercase , pre_tok_state.pop('type'))
a__ : str = add_prefix_space
a__ : Union[str, Any] = pre_tok_class(**lowercase)
a__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Optional[int] = 'post_processor'
a__ : Optional[int] = getattr(self.backend_tokenizer , lowercase , lowercase)
if tokenizer_component_instance:
a__ : str = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Any = tuple(state['sep'])
if "cls" in state:
a__ : str = tuple(state['cls'])
a__ : List[str] = False
if state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : List[str] = add_prefix_space
a__ : List[str] = True
if state.get('trim_offsets' , lowercase) != trim_offsets:
a__ : Optional[int] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : Optional[int] = getattr(lowercase , state.pop('type'))
a__ : Tuple = component_class(**lowercase)
setattr(self.backend_tokenizer , lowercase , lowercase)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , lowercase) -> Any:
'''simple docstring'''
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else value
a__ : str = value
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : List[str] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : List[str] = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def __lowercase ( self , lowercase , lowercase=None) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : int = [self.sep_token_id]
a__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 225
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ = {
'gpt-neox-20b': 2048,
}
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
__lowerCamelCase : Any =VOCAB_FILES_NAMES
__lowerCamelCase : int =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int =['input_ids', 'attention_mask']
def __init__( self : int , __lowercase : Any=None , __lowercase : Tuple=None , __lowercase : Dict=None , __lowercase : Tuple="<|endoftext|>" , __lowercase : Optional[Any]="<|endoftext|>" , __lowercase : str="<|endoftext|>" , __lowercase : List[str]=False , **__lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowercase ) != add_prefix_space:
__a = getattr(__lowercase , pre_tok_state.pop("""type""" ) )
__a = add_prefix_space
__a = pre_tok_class(**__lowercase )
__a = add_prefix_space
def UpperCamelCase_ ( self : int , __lowercase : Optional[int] , __lowercase : Optional[Any] = None ):
'''simple docstring'''
__a = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Any ):
'''simple docstring'''
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase ) + [self.eos_token_id] )
if len(__lowercase ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 302
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349
| 0
|
import math
import unittest
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def __UpperCAmelCase ( self ) -> Tuple:
with self.assertRaises(_UpperCamelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 145
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = RemBertConfig.from_json_file(__snake_case )
print('Building PyTorch model from configuration: {}'.format(str(__snake_case ) ) )
UpperCAmelCase_ : Dict = RemBertModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print('Save PyTorch model to {}'.format(__snake_case ) )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 145
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=None):
super().__init__(
lowerCAmelCase__ , question_encoder_tokenizer=lowerCAmelCase__ , generator_tokenizer=lowerCAmelCase__ , index=lowerCAmelCase__ , init_retrieval=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = None
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : int):
logger.info("initializing retrieval")
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized")
# needs to be set manually
SCREAMING_SNAKE_CASE_: Dict = self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE_: List[Any] = str(distributed_port + 1)
SCREAMING_SNAKE_CASE_: int = dist.new_group(ranks=lowerCAmelCase__ , backend="gloo")
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main")
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return dist.get_rank(group=self.process_group) == 0
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]=torch.floataa):
SCREAMING_SNAKE_CASE_: str = torch.empty(lowerCAmelCase__ , dtype=lowerCAmelCase__)
dist.scatter(lowerCAmelCase__ , src=0 , scatter_list=lowerCAmelCase__ , group=self.process_group)
return target_tensor
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE_: Any = next((addr for addr in addrs if addr.startswith("e")) , lowerCAmelCase__)
return ifname
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : int):
# single GPU training
if not dist.is_initialized():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._main_retrieve(lowerCAmelCase__ , lowerCAmelCase__)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase__)
# distributed training
SCREAMING_SNAKE_CASE_: Dict = dist.get_world_size(group=self.process_group)
# gather logic
SCREAMING_SNAKE_CASE_: Optional[Any] = None
if self._is_main():
SCREAMING_SNAKE_CASE_: Optional[int] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(lowerCAmelCase__)]
dist.gather(torch.tensor(lowerCAmelCase__) , dst=0 , gather_list=lowerCAmelCase__ , group=self.process_group)
# scatter logic
SCREAMING_SNAKE_CASE_: Optional[Any] = question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE_: Dict = []
SCREAMING_SNAKE_CASE_: List[Any] = []
if self._is_main():
assert len(lowerCAmelCase__) == world_size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self._main_retrieve(torch.cat(lowerCAmelCase__).numpy() , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = torch.tensor(lowerCAmelCase__), torch.tensor(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self._chunk_tensor(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self._chunk_tensor(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self._scattered(lowerCAmelCase__ , [n_queries, n_docs] , target_type=torch.intaa)
SCREAMING_SNAKE_CASE_: int = self._scattered(lowerCAmelCase__ , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase__)
| 13
|
def A_ ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = f"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = f"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = input_str.split("_" )
SCREAMING_SNAKE_CASE_: str = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE_: int = words[start_index:]
SCREAMING_SNAKE_CASE_: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE_: List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ : Any = 1_6
lowerCamelCase__ : Optional[int] = 3_2
def __lowerCamelCase ( __a :Accelerator , __a :int = 1_6 ) -> Optional[Any]:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__a :Dict ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
__a , batched=__a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__a :Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
__a , padding="""longest""" , max_length=__a , pad_to_multiple_of=__a , return_tensors="""pt""" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
A__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ : List[str] = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( __a :List[Any] , __a :List[str] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __a ) == "1":
A__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["""lr"""]
A__ = int(config["""num_epochs"""] )
A__ = int(config["""seed"""] )
A__ = int(config["""batch_size"""] )
set_seed(__a )
A__ , A__ = get_dataloaders(__a , __a )
A__ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
__a , __a , __a , __a , __a )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
A__ = os.path.split(__a )[-1].split(""".""" )[0]
accelerator.init_trackers(__a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
A__ = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**__a )
A__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
A__ = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**__a )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__a , references=__a , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __a )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(__a ),
"""epoch""": epoch,
} , step=__a , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__a , default=__a , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__a , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
A__ = parser.parse_args()
A__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 350
|
import math
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = input("""Enter message: """ )
A__ = int(input(F'Enter key [2-{len(__a ) - 1}]: ' ) )
A__ = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
A__ = encrypt_message(__a , __a )
elif mode.lower().startswith("""d""" ):
A__ = decrypt_message(__a , __a )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = [""""""] * key
for col in range(__a ):
A__ = col
while pointer < len(__a ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__a )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = math.ceil(len(__a ) / key )
A__ = key
A__ = (num_cols * num_rows) - len(__a )
A__ = [""""""] * num_cols
A__ = 0
A__ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
A__ = 0
row += 1
return "".join(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 276
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ : Union[str, Any] = 16
UpperCAmelCase_ : int = 32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Accelerator , __magic_name__ : int = 16 , __magic_name__ : str = "bert-base-cased" ) -> Dict:
"""simple docstring"""
UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__magic_name__ )
UpperCamelCase :Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase :List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase :List[Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__magic_name__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase :Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__magic_name__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__magic_name__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase :List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
UpperCamelCase :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase :Union[str, Any] = config["""lr"""]
UpperCamelCase :List[str] = int(config["""num_epochs"""] )
UpperCamelCase :str = int(config["""seed"""] )
UpperCamelCase :Dict = int(config["""batch_size"""] )
UpperCamelCase :Union[str, Any] = args.model_name_or_path
set_seed(__magic_name__ )
UpperCamelCase , UpperCamelCase :Dict = get_dataloaders(__magic_name__ , __magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , return_dict=__magic_name__ )
# Instantiate optimizer
UpperCamelCase :Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__magic_name__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase :Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase :Any = 1
UpperCamelCase :Dict = (len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase :List[Any] = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=0 , num_training_steps=__magic_name__ , )
else:
UpperCamelCase :Any = DummyScheduler(__magic_name__ , total_num_steps=__magic_name__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase :int = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase :Tuple = 0
# Now we train the model
UpperCamelCase :Any = evaluate.load("""glue""" , """mrpc""" )
UpperCamelCase :Tuple = 0
UpperCamelCase :List[Any] = {}
for epoch in range(__magic_name__ , __magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
UpperCamelCase :List[str] = model(**__magic_name__ )
UpperCamelCase :Dict = outputs.loss
UpperCamelCase :Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase :str = 0
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase :Optional[int] = model(**__magic_name__ )
UpperCamelCase :List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase , UpperCamelCase :Optional[int] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__magic_name__ ) - 1:
UpperCamelCase :Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
UpperCamelCase :List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __magic_name__ )
UpperCamelCase :Dict = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
UpperCamelCase :str = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase :List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__magic_name__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__magic_name__ , )
parser.add_argument(
"""--output_dir""" , type=__magic_name__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=__magic_name__ , default=__magic_name__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=__magic_name__ , default=3 , help="""Number of train epochs.""" , )
UpperCamelCase :str = parser.parse_args()
UpperCamelCase :Any = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 38
|
"""simple docstring"""
def snake_case__ ( ):
"""simple docstring"""
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_lowercase : str = generate_large_matrix()
_lowercase : str = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def snake_case__ ( __lowerCamelCase : list[list[int]] ):
"""simple docstring"""
assert all(row == sorted(__lowerCamelCase , reverse=__lowerCamelCase ) for row in grid )
assert all(list(__lowerCamelCase ) == sorted(__lowerCamelCase , reverse=__lowerCamelCase ) for col in zip(*__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
lowerCamelCase__ : int =0
lowerCamelCase__ : Optional[int] =len(__lowerCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase__ : List[str] =(left + right) // 2
lowerCamelCase__ : Union[str, Any] =array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase__ : Union[str, Any] =mid + 1
else:
lowerCamelCase__ : Dict =mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : list[list[int]] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =len(grid[0] )
for i in range(len(__lowerCamelCase ) ):
lowerCamelCase__ : Any =find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCamelCase ) * len(grid[0] )) - total
def snake_case__ ( __lowerCamelCase : list[list[int]] ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def snake_case__ ( __lowerCamelCase : list[list[int]] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =0
for row in grid:
for i, number in enumerate(__lowerCamelCase ):
if number < 0:
total += len(__lowerCamelCase ) - i
break
return total
def snake_case__ ( ):
"""simple docstring"""
from timeit import timeit
print('''Running benchmarks''' )
lowerCamelCase__ : List[str] =(
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase__ : Union[str, Any] =timeit(f'''{func}(grid=grid)''' , setup=__lowerCamelCase , number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 238
| 0
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a :Any = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __a (unittest.TestCase):
'''simple docstring'''
@classmethod
def _a ( cls ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TOKEN
HfFolder.save_token(_a )
@classmethod
def _a ( cls ) -> int:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxBertModel(_a )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE__ : Tuple = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id="""test-model-flax""" , push_to_hub=_a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE__ : List[Any] = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE__ : List[str] = FlaxBertModel(_a )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=_a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : str = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
SCREAMING_SNAKE_CASE__ : List[Any] = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[str] = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE__ : List[str] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
SCREAMING_SNAKE_CASE__ : Tuple = False
return models_are_equal
@require_flax
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
SCREAMING_SNAKE_CASE__ : List[str] = FlaxBertModel(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxBertModel.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxBertModel(_a )
SCREAMING_SNAKE_CASE__ : List[str] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size="""10KB""" )
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE__ : Dict = FlaxBertModel.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Any = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """bert"""
SCREAMING_SNAKE_CASE__ : Dict = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxBertModel.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : int = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """bert"""
SCREAMING_SNAKE_CASE__ : Any = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE__ : Dict = FlaxBertModel.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 56
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __a (metaclass=UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self , *_a , **_a ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> int:
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 56
| 1
|
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return abs(__UpperCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = y, x % y
return abs(__UpperCAmelCase )
def UpperCAmelCase_ ( ) -> Optional[int]:
try:
SCREAMING_SNAKE_CASE_ = input('Enter two integers separated by comma (,): ' ).split(',' )
SCREAMING_SNAKE_CASE_ = int(nums[0] )
SCREAMING_SNAKE_CASE_ = int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(__UpperCAmelCase , __UpperCAmelCase )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__UpperCAmelCase , __UpperCAmelCase )}" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 225
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = 0
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase ).to_dict()
config_dict.pop('image_processor_type' )
SCREAMING_SNAKE_CASE_ = CLIPImageProcessor(**_lowerCAmelCase )
# save in new folder
model_config.save_pretrained(_lowerCAmelCase )
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE_ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'clip-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('clip-base' )
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : str ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def lowerCAmelCase_ ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def lowerCAmelCase_ ( self : Optional[Any] ):
try:
AutoConfig.register('custom' , _lowerCAmelCase )
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = CustomImageProcessor.from_pretrained(_lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self : Union[str, Any] ):
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
try:
AutoConfig.register('custom' , _lowerCAmelCase )
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(_lowerCAmelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 225
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> list:
lowercase__ : Tuple = [0] * len(__lowerCamelCase )
for i in range(1 , len(__lowerCamelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : str = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : int = j
return prefix_result
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
return max(prefix_function(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
lowerCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ : str = import_module('''tasks''' )
try:
lowercase__ : List[str] = getattr(__lowerCamelCase , model_args.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowercase__ : Dict[int, str] = dict(enumerate(__lowerCamelCase ) )
lowercase__ : Optional[int] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ : str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase , __lowerCamelCase ) -> Tuple[List[int], List[int]]:
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=2 )
lowercase__ , lowercase__ : Tuple = preds.shape
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
lowercase__ : Tuple = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase ) -> Dict:
lowercase__ , lowercase__ : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase__ : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase__ : Optional[int] = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(__lowerCamelCase )
lowercase__ , lowercase__ : Tuple = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 302
| 1
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__a = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__a = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__a = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__a = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__a = ''
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict", [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def __UpperCAmelCase ( a_: Any, a_: Any ):
assert ReadMe.from_string(a_, a_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def __UpperCAmelCase ( a_: Optional[int], a_: int ):
with pytest.raises(a_, match=re.escape(expected_error.format(path="root" ) ) ):
_UpperCAmelCase : Union[str, Any] = ReadMe.from_string(a_, a_ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: str, a_: Optional[Any] ):
with pytest.raises(a_, match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(a_, a_ )
@pytest.mark.parametrize(
"readme_md,", [
(README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: str ):
ReadMe.from_string(a_, a_, suppress_parsing_errors=a_ )
@pytest.mark.parametrize(
"readme_md, expected_dict", [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def __UpperCAmelCase ( a_: str, a_: Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Tuple = ReadMe.from_readme(a_, a_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def __UpperCAmelCase ( a_: List[Any], a_: str ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Any = expected_error.format(path=a_ )
with pytest.raises(a_, match=re.escape(a_ ) ):
_UpperCAmelCase : str = ReadMe.from_readme(a_, a_ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error", [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: Tuple, a_: Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[str] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
_UpperCAmelCase : Any = expected_error.format(path=a_ )
with pytest.raises(a_, match=re.escape(a_ ) ):
ReadMe.from_readme(a_, a_ )
@pytest.mark.parametrize(
"readme_md,", [
(README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCAmelCase ( a_: Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[Any] = Path(a_ ) / "README.md"
with open(a_, "w+" ) as readme_file:
readme_file.write(a_ )
ReadMe.from_readme(a_, a_, suppress_parsing_errors=a_ )
| 145
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ['''input_features''', '''attention_mask''']
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any]=8_0 , lowerCAmelCase__ : Tuple=1_6_0_0_0 , lowerCAmelCase__ : Union[str, Any]=8_0 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[Any]=True , **lowerCAmelCase__ : int , ) -> int:
"""simple docstring"""
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : str = num_mel_bins
_UpperCAmelCase : Optional[int] = do_ceptral_normalize
_UpperCAmelCase : List[str] = normalize_means
_UpperCAmelCase : str = normalize_vars
_UpperCAmelCase : Union[str, Any] = True
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase : Tuple = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_UpperCAmelCase : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 )
_UpperCAmelCase : List[Any] = ta_kaldi.fbank(lowerCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : float = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
_UpperCAmelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_UpperCAmelCase : Dict = np.subtract(lowerCAmelCase__ , lowerCAmelCase__ )
if normalize_vars:
_UpperCAmelCase : Any = x[:input_length].std(axis=0 )
_UpperCAmelCase : Optional[int] = np.divide(lowerCAmelCase__ , lowerCAmelCase__ )
if input_length < x.shape[0]:
_UpperCAmelCase : str = padding_value
# make sure array is in float32
_UpperCAmelCase : Union[str, Any] = x.astype(np.floataa )
return x
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[np.ndarray] , lowerCAmelCase__ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
"""simple docstring"""
_UpperCAmelCase : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase__ , lowerCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
def __call__( self : List[Any] , lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Optional[Any] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Any = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : List[Any] = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Any = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
_UpperCAmelCase : Dict = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[Any] = [raw_speech]
# extract fbank features
_UpperCAmelCase : Tuple = [self._extract_fbank_features(lowerCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : Optional[Any] = BatchFeature({"input_features": features} )
_UpperCAmelCase : Optional[Any] = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
# make sure list is in array format
_UpperCAmelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowerCAmelCase__ ):
_UpperCAmelCase : int = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_UpperCAmelCase : Dict = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_UpperCAmelCase : List[str] = (
np.array(lowerCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_UpperCAmelCase : str = self.normalize(
padded_inputs["input_features"] , attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
_UpperCAmelCase : Any = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 145
| 1
|
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def lowerCamelCase (a_ :Any) -> Optional[int]:
lowercase :int = np.max(a_ , axis=-1 , keepdims=a_)
lowercase :Any = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a_)
class __magic_name__ ( __UpperCAmelCase ):
def __snake_case ( self : Optional[int] , **snake_case__ : int ):
'''simple docstring'''
lowercase :int = {}
if "second_text" in kwargs:
lowercase :List[Any] = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def __snake_case ( self : List[str] , snake_case__ : int , snake_case__ : Dict=None ):
'''simple docstring'''
return self.tokenizer(snake_case__ , text_pair=snake_case__ , return_tensors=self.framework )
def __snake_case ( self : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
return self.model(**snake_case__ )
def __snake_case ( self : int , snake_case__ : List[str] ):
'''simple docstring'''
lowercase :int = model_outputs.logits[0].numpy()
lowercase :Tuple = softmax(snake_case__ )
lowercase :str = np.argmax(snake_case__ )
lowercase :List[Any] = self.model.config.idalabel[best_class]
lowercase :Dict = probabilities[best_class].item()
lowercase :List[str] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 172
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : str = "imagegpt"
__A : str = ["past_key_values"]
__A : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=5_1_2 + 1 , snake_case__ : Optional[int]=3_2 * 3_2 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[str]=2_4 , snake_case__ : Any=8 , snake_case__ : str=None , snake_case__ : Any="quick_gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=1e-5 , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=True , snake_case__ : Dict=True , snake_case__ : str=False , snake_case__ : Optional[int]=False , snake_case__ : Union[str, Any]=False , **snake_case__ : Union[str, Any] , ):
'''simple docstring'''
lowercase :int = vocab_size
lowercase :str = n_positions
lowercase :List[str] = n_embd
lowercase :int = n_layer
lowercase :List[str] = n_head
lowercase :Tuple = n_inner
lowercase :Tuple = activation_function
lowercase :Optional[Any] = resid_pdrop
lowercase :Tuple = embd_pdrop
lowercase :Dict = attn_pdrop
lowercase :List[Any] = layer_norm_epsilon
lowercase :List[Any] = initializer_range
lowercase :List[Any] = scale_attn_weights
lowercase :Dict = use_cache
lowercase :List[str] = scale_attn_by_inverse_layer_idx
lowercase :List[str] = reorder_and_upcast_attn
lowercase :Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case__ , **snake_case__ )
class __magic_name__ ( __UpperCAmelCase ):
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __snake_case ( self : Union[str, Any] , snake_case__ : "FeatureExtractionMixin" , snake_case__ : int = 1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 3_2 , snake_case__ : int = 3_2 , ):
'''simple docstring'''
lowercase :Union[str, Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase :List[str] = dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) )
return inputs
| 172
| 1
|
def UpperCAmelCase ( a_ , a_ ) -> float:
"""simple docstring"""
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(a_ ) * abs(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 15
|
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> Optional[int]:
'''simple docstring'''
_a , _a : List[str] =text, pattern
_a , _a : Union[str, Any] =len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCAmelCase ( self :Union[str, Any] ) -> list[int]:
'''simple docstring'''
# searches pattern in text and returns index positions
_a : Union[str, Any] =[]
for i in range(self.textLen - self.patLen + 1 ):
_a : Any =self.mismatch_in_text(SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE )
else:
_a : int =self.match_in_pattern(self.text[mismatch_index] )
_a : List[str] =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A__: Any = '''ABAABA'''
A__: int = '''AB'''
A__: Optional[int] = BoyerMooreSearch(text, pattern)
A__: Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 276
| 0
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class UpperCAmelCase ( A_ ):
def __init__(self : int , **snake_case__ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__(self : List[str] , snake_case__ : Union[str, List[str], "Image", List["Image"]] , **snake_case__ : Any ) -> Dict:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (self : List[str] , **snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = {}
if "candidate_labels" in kwargs:
snake_case : Union[str, Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
snake_case : List[Any] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[Any] , snake_case__ : str=None , snake_case__ : Optional[Any]="This is a photo of {}." ) -> Any:
'''simple docstring'''
snake_case : Dict = load_image(_lowerCAmelCase )
snake_case : Dict = self.image_processor(images=[image] , return_tensors=self.framework )
snake_case : Tuple = candidate_labels
snake_case : Dict = [hypothesis_template.format(_lowerCAmelCase ) for x in candidate_labels]
snake_case : Optional[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework , padding=_lowerCAmelCase )
snake_case : Dict = [text_inputs]
return inputs
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = model_inputs.pop("candidate_labels" )
snake_case : List[Any] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , _lowerCAmelCase ):
snake_case : List[Any] = text_inputs[0]
else:
# Batching case.
snake_case : Any = text_inputs[0][0]
snake_case : List[str] = self.model(**_lowerCAmelCase , **_lowerCAmelCase )
snake_case : Optional[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Optional[int] ) -> str:
'''simple docstring'''
snake_case : List[str] = model_outputs.pop("candidate_labels" )
snake_case : Tuple = model_outputs["logits"][0]
if self.framework == "pt":
snake_case : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case : Optional[Any] = probs.tolist()
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case : List[str] = [scores]
elif self.framework == "tf":
snake_case : Any = stable_softmax(_lowerCAmelCase , axis=-1 )
snake_case : int = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
snake_case : Any = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda snake_case__ : -x[0] )
]
return result
| 353
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Any , snake_case__ : Tuple=99 , snake_case__ : Tuple=13 , snake_case__ : int=16 , snake_case__ : Tuple=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True , snake_case__ : Any=2 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : int=30 , snake_case__ : int=0 , snake_case__ : Tuple=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=None , ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = decoder_seq_length
# For common tests
snake_case : Any = self.decoder_seq_length
snake_case : Optional[int] = is_training
snake_case : List[str] = use_attention_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : Any = d_model
snake_case : Dict = d_model
snake_case : List[str] = decoder_layers
snake_case : Union[str, Any] = decoder_layers
snake_case : int = decoder_ffn_dim
snake_case : List[Any] = decoder_attention_heads
snake_case : Dict = decoder_attention_heads
snake_case : Optional[int] = eos_token_id
snake_case : Dict = bos_token_id
snake_case : List[str] = pad_token_id
snake_case : int = decoder_start_token_id
snake_case : List[Any] = use_cache
snake_case : List[str] = max_position_embeddings
snake_case : Dict = None
snake_case : Union[str, Any] = decoder_seq_length
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 1
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_attention_mask:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
snake_case : Optional[int] = True
snake_case : List[Any] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval()
snake_case : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case : List[str] = model(snake_case__ , use_cache=snake_case__ )
snake_case : Any = model(snake_case__ )
snake_case : Any = model(snake_case__ , use_cache=snake_case__ )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) )
self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 )
snake_case : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : str = model(snake_case__ )["last_hidden_state"]
snake_case : str = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"]
# select random slice
snake_case : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : str = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,A_ ,unittest.TestCase ):
A__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : int = True
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ )
snake_case : int = ConfigTester(self , config_class=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
pass
| 10
| 0
|
'''simple docstring'''
import math
import os
import sys
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = ''''''
try:
with open(__UpperCAmelCase, '''rb''' ) as binary_file:
snake_case_ = binary_file.read()
for dat in data:
snake_case_ = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> None:
'''simple docstring'''
lexicon.pop(__UpperCAmelCase )
snake_case_ = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
snake_case_ = '''0''' + lexicon[curr_key]
snake_case_ = bin(__UpperCAmelCase )[2:]
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = {'''0''': '''0''', '''1''': '''1'''}
snake_case_ ,snake_case_ = '''''', ''''''
snake_case_ = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
index += 1
snake_case_ = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
snake_case_ = lexicon[curr_string]
result += last_match_id
return result
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = os.path.getsize(__UpperCAmelCase )
snake_case_ = bin(__UpperCAmelCase )[2:]
snake_case_ = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> None:
'''simple docstring'''
snake_case_ = 8
try:
with open(__UpperCAmelCase, '''wb''' ) as opened_file:
snake_case_ = [
to_write[i : i + byte_length]
for i in range(0, len(__UpperCAmelCase ), __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase, 2 ).to_bytes(1, byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> None:
'''simple docstring'''
snake_case_ = read_file_binary(__UpperCAmelCase )
snake_case_ = compress_data(__UpperCAmelCase )
snake_case_ = add_file_length(__UpperCAmelCase, __UpperCAmelCase )
write_file_binary(__UpperCAmelCase, __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56
| 1
|
"""simple docstring"""
import math
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
if (
not isinstance(_a ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
if (
not isinstance(_a ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.