code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Optional[Any] , _snake_case : Tuple=13 , _snake_case : str=7 , _snake_case : Any=True , _snake_case : List[Any]=True , _snake_case : Union[str, Any]=True , _snake_case : List[str]=True , _snake_case : Optional[Any]=99 , _snake_case : Union[str, Any]=32 , _snake_case : Optional[int]=5 , _snake_case : Tuple=4 , _snake_case : List[Any]=37 , _snake_case : Tuple="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Union[str, Any]=1_28 , _snake_case : int=32 , _snake_case : int=16 , _snake_case : List[Any]=2 , _snake_case : Dict=0.02 , _snake_case : str=3 , _snake_case : Optional[Any]=4 , _snake_case : Optional[int]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def _a ( self : Dict ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[str] ):
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.prepare_config_and_inputs()
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _a ( self : str , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : int , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
A__ = NezhaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
A__ = model(_snake_case , token_type_ids=_snake_case )
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Dict , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : int , ):
"""simple docstring"""
A__ = True
A__ = NezhaModel(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
A__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , )
A__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str , _snake_case : Dict , _snake_case : int , _snake_case : str , _snake_case : Dict , _snake_case : Any , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = NezhaForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : int , _snake_case : str , _snake_case : str ):
"""simple docstring"""
A__ = NezhaForNextSentencePrediction(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a ( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Dict , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = NezhaForPreTraining(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , next_sentence_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a ( self : Union[str, Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Any , _snake_case : Any , _snake_case : Dict , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = NezhaForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = self.num_labels
A__ = NezhaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : List[str] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = self.num_labels
A__ = NezhaForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : int , _snake_case : int , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Tuple , _snake_case : List[str] , _snake_case : int ):
"""simple docstring"""
A__ = self.num_choices
A__ = NezhaForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : str = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : int = True
def _a ( self : Tuple , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : str=False ):
"""simple docstring"""
A__ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
A__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case )
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def _a ( self : Any ):
"""simple docstring"""
A__ = NezhaModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def _a ( self : str ):
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = NezhaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@slow
@require_torch_gpu
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
A__ = True
A__ = model_class(config=_snake_case )
A__ = self._prepare_for_class(_snake_case , _snake_case )
A__ = torch.jit.trace(
_snake_case , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_snake_case , os.path.join(_snake_case , 'bert.pt' ) )
A__ = torch.jit.load(os.path.join(_snake_case , 'bert.pt' ) , map_location=_snake_case )
loaded(inputs_dict['input_ids'].to(_snake_case ) , inputs_dict['attention_mask'].to(_snake_case ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : str ):
"""simple docstring"""
A__ = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
A__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(_snake_case , attention_mask=_snake_case )[0]
A__ = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , _snake_case )
A__ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A__ = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
A__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(_snake_case , attention_mask=_snake_case )[0]
A__ = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , _snake_case )
A__ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 ) )
| 52
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Union[str, Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 52
| 1
|
def A ( __UpperCamelCase ) -> int:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('only integers accepted as input' )
else:
A__ = str(abs(__UpperCamelCase ) )
A__ = [list(__UpperCamelCase ) for char in range(len(__UpperCamelCase ) )]
for index in range(len(__UpperCamelCase ) ):
num_transpositions[index].pop(__UpperCamelCase )
return max(
int(''.join(list(__UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 52
|
def A ( __UpperCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 1
|
from __future__ import annotations
import os
from collections.abc import Mapping
SCREAMING_SNAKE_CASE__ = tuple[int, int]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , _snake_case : set[int] , _snake_case : Mapping[EdgeT, int] ):
"""simple docstring"""
A__ = vertices
A__ = {
(min(_snake_case ), max(_snake_case )): weight for edge, weight in edges.items()
}
def _a ( self : Optional[Any] , _snake_case : EdgeT , _snake_case : int ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
A__ = weight
def _a ( self : List[str] ):
"""simple docstring"""
A__ = Graph({min(self.vertices )} , {} )
A__ = 42
A__ = 42
A__ = 42
A__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
A__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
A__ = edge
A__ = weight
subgraph.add_edge(_snake_case , _snake_case )
return subgraph
def A ( __UpperCamelCase = "p107_network.txt" ) -> int:
A__ = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
A__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
A__ = {}
A__ = 42
A__ = 42
A__ = 42
with open(__UpperCamelCase ) as f:
A__ = f.read().strip().split('\n' )
A__ = [line.split(',' ) for line in data]
for edgea in range(1 , len(__UpperCamelCase ) ):
for edgea in range(__UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
A__ = int(adjaceny_matrix[edgea][edgea] )
A__ = Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase )
A__ = graph.prims_algorithm()
A__ = sum(graph.edges.values() )
A__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'{solution() = }')
| 52
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 1
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE__ = parse(importlib.metadata.version('''torch'''))
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A__ = parse(importlib.metadata.version(__UpperCamelCase ) )
return operation(__UpperCamelCase , parse(__UpperCamelCase ) )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
return compare_versions(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
| 52
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
| 1
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : str , _snake_case : str=13 , _snake_case : Union[str, Any]=32 , _snake_case : Tuple=3 , _snake_case : List[str]=4 , _snake_case : Optional[Any]=[10, 20, 30, 40] , _snake_case : List[str]=[2, 2, 3, 2] , _snake_case : Any=True , _snake_case : Optional[Any]=True , _snake_case : Union[str, Any]=37 , _snake_case : List[str]="gelu" , _snake_case : str=10 , _snake_case : int=0.02 , _snake_case : Optional[Any]=["stage2", "stage3", "stage4"] , _snake_case : Optional[int]=[2, 3, 4] , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = num_labels
A__ = initializer_range
A__ = out_features
A__ = out_indices
A__ = scope
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _a ( self : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = ConvNextModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : str , _snake_case : Optional[int] , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
A__ = ConvNextForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Dict ):
"""simple docstring"""
A__ = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A__ = None
A__ = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A__ : Optional[Any] = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : Optional[int] = False
A__ : str = False
A__ : Tuple = False
A__ : List[str] = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ConvNextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Dict ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def _a ( self : str ):
"""simple docstring"""
pass
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : int , _snake_case : int , _snake_case : Union[str, Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def _a ( self : str ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ConvNextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Optional[Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def _a ( self : List[str] ):
"""simple docstring"""
A__ = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase , UpperCAmelCase_ ):
"""simple docstring"""
A__ : Dict = (ConvNextBackbone,) if is_torch_available() else ()
A__ : Tuple = ConvNextConfig
A__ : Union[str, Any] = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = ConvNextModelTester(self )
| 52
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 1
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
SCREAMING_SNAKE_CASE__ = '''sshleifer/mar_enro_6_3_student'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
super().setUp()
A__ = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_snake_case , )
A__ = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def _a ( self : Dict ):
"""simple docstring"""
MarianMTModel.from_pretrained(_snake_case )
@slow
@require_torch_gpu
def _a ( self : str ):
"""simple docstring"""
A__ = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
A__ = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
A__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
A__ = bash_script.replace(_snake_case , str(_snake_case ) )
A__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
A__ = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
A__ = ['finetune.py'] + bash_script.split() + args
with patch.object(_snake_case , 'argv' , _snake_case ):
A__ = argparse.ArgumentParser()
A__ = pl.Trainer.add_argparse_args(_snake_case )
A__ = SummarizationModule.add_model_specific_args(_snake_case , os.getcwd() )
A__ = parser.parse_args()
A__ = main(_snake_case )
# Check metrics
A__ = load_json(model.metrics_save_path )
A__ = metrics['val'][0]
A__ = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _snake_case )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
A__ = os.listdir(_snake_case )
A__ = [x for x in contents if x.endswith('.ckpt' )][0]
A__ = os.path.join(args.output_dir , _snake_case )
A__ = torch.load(_snake_case , map_location='cpu' )
A__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
A__ = {os.path.basename(_snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
A__ = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 1_28,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
A__ = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
A__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
A__ = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
A__ = bash_script.replace(_snake_case , str(_snake_case ) )
A__ = self.get_auto_remove_tmp_dir()
A__ = bash_script.replace('--fp16' , '' )
A__ = 6
A__ = (
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(_snake_case , 'argv' , _snake_case ):
A__ = argparse.ArgumentParser()
A__ = pl.Trainer.add_argparse_args(_snake_case )
A__ = SummarizationDistiller.add_model_specific_args(_snake_case , os.getcwd() )
A__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
A__ = distill_main(_snake_case )
# Check metrics
A__ = load_json(model.metrics_save_path )
A__ = metrics['val'][0]
A__ = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _snake_case )
# check lightning ckpt can be loaded and has a reasonable statedict
A__ = os.listdir(_snake_case )
A__ = [x for x in contents if x.endswith('.ckpt' )][0]
A__ = os.path.join(args.output_dir , _snake_case )
A__ = torch.load(_snake_case , map_location='cpu' )
A__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
A__ = {os.path.basename(_snake_case ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 52
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 1
|
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> int:
if isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(__UpperCamelCase , __UpperCamelCase ):
A__ = len(set_a.intersection(__UpperCamelCase ) )
if alternative_union:
A__ = len(__UpperCamelCase ) + len(__UpperCamelCase )
else:
A__ = len(set_a.union(__UpperCamelCase ) )
return intersection / union
if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(__UpperCamelCase , (list, tuple) ):
A__ = [element for element in set_a if element in set_b]
if alternative_union:
A__ = len(__UpperCamelCase ) + len(__UpperCamelCase )
return len(__UpperCamelCase ) / union
else:
A__ = set_a + [element for element in set_b if element not in set_a]
return len(__UpperCamelCase ) / len(__UpperCamelCase )
return len(__UpperCamelCase ) / len(__UpperCamelCase )
return None
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {'''a''', '''b''', '''c''', '''d''', '''e'''}
SCREAMING_SNAKE_CASE__ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 1
|
def A ( __UpperCamelCase ) -> str:
A__ = 0
A__ = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def A ( __UpperCamelCase ) -> Dict:
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ = len(__UpperCamelCase ) // 2
A__ = arr[0:mid]
A__ = arr[mid:]
A__ , A__ = count_inversions_recursive(__UpperCamelCase )
A__ , A__ = count_inversions_recursive(__UpperCamelCase )
A__ , A__ = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = []
A__ = A__ = A__ = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def A ( ) -> List[str]:
A__ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ = count_inversions_bf(__UpperCamelCase )
A__ , A__ = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ = count_inversions_bf(__UpperCamelCase )
A__ , A__ = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ = []
A__ = count_inversions_bf(__UpperCamelCase )
A__ , A__ = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 52
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 1
|
from math import factorial
SCREAMING_SNAKE_CASE__ = {str(digit): factorial(digit) for digit in range(1_0)}
def A ( __UpperCamelCase ) -> int:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCamelCase ) )
def A ( __UpperCamelCase = 60 , __UpperCamelCase = 1_000_000 ) -> int:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
A__ = 0
# the cached sizes of the previous chains
A__ = {}
for start_chain_element in range(1 , __UpperCamelCase ):
# The temporary set will contain the elements of the chain
A__ = set()
A__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
A__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__UpperCamelCase )
chain_set_length += 1
A__ = digit_factorial_sum(__UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
A__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution()}')
| 52
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''GLPNFeatureExtractor''']
SCREAMING_SNAKE_CASE__ = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52
| 1
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : int = 0
A__ : bool = False
A__ : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
A__ = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
A__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , _snake_case )
@require_multi_gpu
def _a ( self : Dict ):
"""simple docstring"""
A__ = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
SCREAMING_SNAKE_CASE__ = Accelerator(kwargs_handlers=[ddp_scaler])
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1_0_0, 2_0_0)
SCREAMING_SNAKE_CASE__ = accelerator.prepare(model)
# Check the values changed in kwargs
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = LongformerTokenizer
A__ : Optional[int] = True
A__ : Any = LongformerTokenizerFast
A__ : Dict = True
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : int , **_snake_case : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : List[Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
| 1
|
import os
import sys
import unittest
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE__ = os.path.join(git_repo_path, '''src''', '''transformers''')
SCREAMING_SNAKE_CASE__ = '''
{0} = None
'''
SCREAMING_SNAKE_CASE__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
SCREAMING_SNAKE_CASE__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(_snake_case )
A__ = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(_snake_case , 'tokenizers' )
A__ = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(_snake_case , 'tensorflow_text' )
A__ = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(_snake_case , 'sentencepiece_and_tokenizers' )
A__ = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(_snake_case , 'sentencepiece_and_tensorflow_text' )
A__ = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(_snake_case , 'sentencepiece_and_tokenizers_and_vision' )
def _a ( self : str ):
"""simple docstring"""
A__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , _snake_case )
self.assertIn('tensorflow_text' , _snake_case )
self.assertIn('sentencepiece_and_tokenizers' , _snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(_snake_case , '\nCONSTANT = None\n' )
A__ = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
_snake_case , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
A__ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
A__ = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(_snake_case , _snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
A__ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , _snake_case )
| 52
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCamelCase ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / 'cache'
A__ = test_hf_cache_home / 'datasets'
A__ = test_hf_cache_home / 'metrics'
A__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='session' )
def A ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
| 52
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = args.log_outputs
A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
A__ = load_metric('wer' )
A__ = load_metric('cer' )
# compute metrics
A__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
A__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
A__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f'''log_{dataset_id}_predictions.txt'''
A__ = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(__UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
A__ = ' '.join(text.split(__UpperCamelCase ) )
return text
def A ( __UpperCamelCase ) -> Union[str, Any]:
# load dataset
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
A__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['text']
A__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 52
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , _snake_case : int , _snake_case : List[Any] ):
"""simple docstring"""
A__ = params
A__ = np.array(_snake_case )
A__ = np.array([len(_snake_case ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[Any] , _snake_case : Tuple ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.lengths )
def _a ( self : str ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.params.max_model_input_size
A__ = self.lengths > max_len
logger.info(F'''Splitting {sum(_snake_case )} too long sequences.''' )
def divide_chunks(_snake_case : int , _snake_case : Optional[int] ):
return [l[i : i + n] for i in range(0 , len(_snake_case ) , _snake_case )]
A__ = []
A__ = []
if self.params.mlm:
A__ , A__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
A__ , A__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
A__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
A__ = np.insert(_snake_case , 0 , _snake_case )
if sub_s[-1] != sep_id:
A__ = np.insert(_snake_case , len(_snake_case ) , _snake_case )
assert len(_snake_case ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_snake_case )
new_tok_ids.extend(_snake_case )
new_lengths.extend([len(_snake_case ) for l in sub_seqs] )
A__ = np.array(_snake_case )
A__ = np.array(_snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = len(self )
A__ = self.lengths > 11
A__ = self.token_ids[indices]
A__ = self.lengths[indices]
A__ = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _a ( self : Dict ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
A__ = self.params.special_tok_ids['unk_token']
A__ = len(self )
A__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
A__ = (unk_occs / self.lengths) < 0.5
A__ = self.token_ids[indices]
A__ = self.lengths[indices]
A__ = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _a ( self : int , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = [t[0] for t in batch]
A__ = [t[1] for t in batch]
assert len(_snake_case ) == len(_snake_case )
# Max for paddings
A__ = max(_snake_case )
# Pad token ids
if self.params.mlm:
A__ = self.params.special_tok_ids['pad_token']
else:
A__ = self.params.special_tok_ids['unk_token']
A__ = [list(t.astype(_snake_case ) ) + [pad_idx] * (max_seq_len_ - len(_snake_case )) for t in token_ids]
assert len(tk_ ) == len(_snake_case )
assert all(len(_snake_case ) == max_seq_len_ for t in tk_ )
A__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
A__ = torch.tensor(_snake_case ) # (bs)
return tk_t, lg_t
| 52
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
| 1
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self : str ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self : str ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 52
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52
| 1
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
SCREAMING_SNAKE_CASE__ = 5_0_0_0_3
SCREAMING_SNAKE_CASE__ = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = PLBartTokenizer
A__ : Optional[Any] = None
A__ : Tuple = False
def _a ( self : Tuple ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = PLBartTokenizer(_snake_case , language_codes='base' , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A__ = PLBartTokenizer(_snake_case , language_codes='base' , keep_accents=_snake_case )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
A__ = tokenizer.vocab_size
A__ = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 4 , _snake_case )]
self.assertListEqual(_snake_case , ['__java__', '__python__', '__en_XX__', '<mask>'] )
A__ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
A__ = tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = PLBartTokenizer(_snake_case , language_codes='multi' , keep_accents=_snake_case )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
A__ = tokenizer.vocab_size
A__ = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 7 , _snake_case )]
self.assertListEqual(
_snake_case , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
A__ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
A__ = tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = "uclanlp/plbart-python-en_XX"
A__ : Optional[Any] = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
A__ : Dict = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
A__ : Tuple = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def _a ( cls : Dict ):
"""simple docstring"""
A__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
A__ = 1
return cls
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_00_03 )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
def _a ( self : str ):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids )
A__ = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A__ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertNotIn(self.tokenizer.eos_token , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , _snake_case )
A__ = 10
A__ = self.tokenizer(_snake_case , max_length=_snake_case , truncation=_snake_case ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _snake_case )
self.assertEqual(len(_snake_case ) , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_00_04, 5_00_01] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_snake_case )
A__ = PLBartTokenizer.from_pretrained(_snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _snake_case )
@require_torch
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='pt' )
A__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _snake_case )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _a ( self : str ):
"""simple docstring"""
A__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
A__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.tokenizer(self.src_text , padding=_snake_case , truncation=_snake_case , max_length=3 , return_tensors='pt' )
A__ = self.tokenizer(
text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=10 , return_tensors='pt' )
A__ = targets['input_ids']
A__ = shift_tokens_right(_snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(_snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[1_50, 2_42, 2, 5_00_03]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_00_01,
} , )
| 52
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self : str ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self : str ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 52
| 1
|
from pathlib import Path
import numpy as np
from PIL import Image
def A ( __UpperCamelCase ) -> np.ndarray:
A__ , A__ , A__ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def A ( __UpperCamelCase ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def A ( __UpperCamelCase , __UpperCamelCase ) -> np.ndarray:
A__ = np.zeros_like(__UpperCamelCase )
A__ = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
A__ = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
A__ = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
A__ = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 52
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Optional[Any] = "BridgeTowerImageProcessor"
A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case )
encoding.update(_snake_case )
return encoding
def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 52
| 1
|
from itertools import permutations
def A ( __UpperCamelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A__ = [7, 11, 13, 17]
for i, test in enumerate(__UpperCamelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def A ( __UpperCamelCase = 10 ) -> int:
return sum(
int(''.join(map(__UpperCamelCase , __UpperCamelCase ) ) )
for num in permutations(range(__UpperCamelCase ) )
if is_substring_divisible(__UpperCamelCase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 52
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 1
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE__ = '''\
'''
SCREAMING_SNAKE_CASE__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
SCREAMING_SNAKE_CASE__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _a ( self : Dict , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : int = 16 , _snake_case : bool = True , _snake_case : Dict=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A__ = 'cuda'
else:
A__ = 'cuda' if torch.cuda.is_available() else 'cpu'
A__ = AutoModelForCausalLM.from_pretrained(_snake_case )
A__ = model.to(_snake_case )
A__ = AutoTokenizer.from_pretrained(_snake_case )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_snake_case ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A__ = model.config.max_length - 1
else:
A__ = model.config.max_length
A__ = tokenizer(
_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors='pt' , return_attention_mask=_snake_case , ).to(_snake_case )
A__ = encodings['input_ids']
A__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A__ = []
A__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(_snake_case ) , _snake_case ) ):
A__ = min(start_index + batch_size , len(_snake_case ) )
A__ = encoded_texts[start_index:end_index]
A__ = attn_masks[start_index:end_index]
if add_start_token:
A__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_snake_case )
A__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
A__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_snake_case ), attn_mask] , dim=1 )
A__ = encoded_batch
with torch.no_grad():
A__ = model(_snake_case , attention_mask=_snake_case ).logits
A__ = out_logits[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = attn_mask[..., 1:].contiguous()
A__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _snake_case ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_snake_case )}
| 52
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 1
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , _snake_case : Optional[int] , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]=True , _snake_case : Union[str, Any]=99 , _snake_case : List[Any]=32 , _snake_case : Optional[Any]=5 , _snake_case : int=4 , _snake_case : List[str]=37 , _snake_case : Any="gelu" , _snake_case : Tuple=0.1 , _snake_case : Tuple=0.1 , _snake_case : Union[str, Any]=5_12 , _snake_case : Tuple=16 , _snake_case : List[Any]=2 , _snake_case : List[str]=0.02 , _snake_case : List[str]=4 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_choices
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : int ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = True
A__ : Dict = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : Any ):
"""simple docstring"""
A__ = FlaxBertModelTester(self )
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxBertModel.from_pretrained('bert-base-cased' )
A__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
| 52
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Union[str, Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 52
| 1
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A ( __UpperCamelCase ) -> Optional[Any]:
# getting number of pixels in the image
A__ , A__ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
A__ = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = imread('''image_data/lena.jpg''', 1)
# convert to its negative
SCREAMING_SNAKE_CASE__ = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 52
|
def A ( __UpperCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 1
|
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : dict[str, list[str]] , _snake_case : str ):
"""simple docstring"""
A__ = graph
# mapping node to its parent in resulting breadth first tree
A__ = {}
A__ = source_vertex
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {self.source_vertex}
A__ = None
A__ = [self.source_vertex] # first in first out queue
while queue:
A__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_snake_case )
A__ = vertex
queue.append(_snake_case )
def _a ( self : str , _snake_case : str ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
A__ = self.parent.get(_snake_case )
if target_vertex_parent is None:
A__ = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_snake_case )
return self.shortest_path(_snake_case ) + F'''->{target_vertex}'''
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 52
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 1
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A ( __UpperCamelCase ) -> Optional[int]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A ( ) -> Any:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
A__ = [1, 2, 3]
with pytest.raises(__UpperCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=2 )
with pytest.raises(__UpperCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def A ( __UpperCamelCase ) -> List[Any]:
A__ = [1, 2]
A__ = {'a': 1, 'b': 2}
A__ = {'a': [1, 2], 'b': [3, 4]}
A__ = {'a': {'1': 1}, 'b': 2}
A__ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
A__ = [2, 3]
A__ = {'a': 2, 'b': 3}
A__ = {'a': [2, 3], 'b': [4, 5]}
A__ = {'a': {'1': 2}, 'b': 3}
A__ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
| 52
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 1
|
from __future__ import annotations
from typing import Any
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , _snake_case : int , _snake_case : int , _snake_case : float = 0 ):
"""simple docstring"""
A__ , A__ = row, column
A__ = [[default_value for c in range(_snake_case )] for r in range(_snake_case )]
def __str__( self : List[Any] ):
"""simple docstring"""
A__ = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
A__ = 0
for row_vector in self.array:
for obj in row_vector:
A__ = max(_snake_case , len(str(_snake_case ) ) )
A__ = F'''%{max_element_length}s'''
# Make string and return
def single_line(_snake_case : list[float] ) -> str:
nonlocal string_format_identifier
A__ = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_snake_case ) for row_vector in self.array )
return s
def __repr__( self : Union[str, Any] ):
"""simple docstring"""
return str(self )
def _a ( self : List[Any] , _snake_case : tuple[int, int] ):
"""simple docstring"""
if not (isinstance(_snake_case , (list, tuple) ) and len(_snake_case ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : List[str] , _snake_case : tuple[int, int] ):
"""simple docstring"""
assert self.validate_indicies(_snake_case )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , _snake_case : tuple[int, int] , _snake_case : float ):
"""simple docstring"""
assert self.validate_indicies(_snake_case )
A__ = value
def __add__( self : Optional[int] , _snake_case : Matrix ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
assert self.row == another.row and self.column == another.column
# Add
A__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
"""simple docstring"""
A__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ = -self[r, c]
return result
def __sub__( self : Tuple , _snake_case : Matrix ):
"""simple docstring"""
return self + (-another)
def __mul__( self : str , _snake_case : int | float | Matrix ):
"""simple docstring"""
if isinstance(_snake_case , (int, float) ): # Scalar multiplication
A__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ = self[r, c] * another
return result
elif isinstance(_snake_case , _snake_case ): # Matrix multiplication
assert self.column == another.row
A__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A__ = F'''Unsupported type given for another ({type(_snake_case )})'''
raise TypeError(_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
A__ = self[r, c]
return result
def _a ( self : List[Any] , _snake_case : Matrix , _snake_case : Matrix ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case ) and isinstance(_snake_case , _snake_case )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A__ = v.transpose()
A__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ) -> None:
# a^(-1)
A__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
A__ = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 1, 2, -3
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__UpperCamelCase , __UpperCamelCase )}''' )
def A ( ) -> None:
import doctest
doctest.testmod()
testa()
| 52
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = 1 # (0 is vertical, 1 is horizontal)
def A ( ) -> None:
A__ , A__ = get_dataset(__UpperCamelCase , __UpperCamelCase )
print('Processing...' )
A__ , A__ , A__ = update_image_and_anno(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A__ = random_chars(32 )
A__ = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
A__ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , __UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__UpperCamelCase )} with {file_name}''' )
A__ = []
for anno in new_annos[index]:
A__ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__UpperCamelCase )
with open(f'''/{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def A ( __UpperCamelCase , __UpperCamelCase ) -> tuple[list, list]:
A__ = []
A__ = []
for label_file in glob.glob(os.path.join(__UpperCamelCase , '*.txt' ) ):
A__ = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__UpperCamelCase ) as in_file:
A__ = in_file.readlines()
A__ = os.path.join(__UpperCamelCase , f'''{label_name}.jpg''' )
A__ = []
for obj_list in obj_lists:
A__ = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 ) -> tuple[list, list, list]:
A__ = []
A__ = []
A__ = []
for idx in range(len(__UpperCamelCase ) ):
A__ = []
A__ = img_list[idx]
path_list.append(__UpperCamelCase )
A__ = anno_list[idx]
A__ = cva.imread(__UpperCamelCase )
if flip_type == 1:
A__ = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
A__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
A__ = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
A__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def A ( __UpperCamelCase = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
A__ = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 52
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 1
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , _snake_case : Union[str, "sqlalchemy.sql.Selectable"] , _snake_case : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _snake_case : Optional[Features] = None , _snake_case : str = None , _snake_case : bool = False , **_snake_case : Optional[Any] , ):
"""simple docstring"""
super().__init__(features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , **_snake_case )
A__ = Sql(
cache_dir=_snake_case , features=_snake_case , sql=_snake_case , con=_snake_case , **_snake_case , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , )
# Build dataset for splits
A__ = self.builder.as_dataset(
split='train' , verification_mode=_snake_case , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Dataset , _snake_case : str , _snake_case : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , **_snake_case : Any , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
A__ = dataset
A__ = name
A__ = con
A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ = num_proc
A__ = to_sql_kwargs
def _a ( self : Any ):
"""simple docstring"""
A__ = self.to_sql_kwargs.pop('sql' , _snake_case )
A__ = self.to_sql_kwargs.pop('con' , _snake_case )
A__ = self.to_sql_kwargs.pop('index' , _snake_case )
A__ = self._write(index=_snake_case , **self.to_sql_kwargs )
return written
def _a ( self : Any , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ , A__ , A__ = args
A__ = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
A__ = query_table(
table=self.dataset.data , key=slice(_snake_case , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ = batch.to_pandas()
A__ = df.to_sql(self.name , self.con , index=_snake_case , **_snake_case )
return num_rows or len(_snake_case )
def _a ( self : Optional[int] , _snake_case : Dict , **_snake_case : Dict ):
"""simple docstring"""
A__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
A__ , A__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _snake_case , _snake_case )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 52
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 1
|
import numpy as np
from transformers import Pipeline
def A ( __UpperCamelCase ) -> Dict:
A__ = np.max(__UpperCamelCase , axis=-1 , keepdims=__UpperCamelCase )
A__ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Dict , **_snake_case : List[Any] ):
"""simple docstring"""
A__ = {}
if "second_text" in kwargs:
A__ = kwargs['second_text']
return preprocess_kwargs, {}, {}
def _a ( self : List[Any] , _snake_case : List[str] , _snake_case : Optional[Any]=None ):
"""simple docstring"""
return self.tokenizer(_snake_case , text_pair=_snake_case , return_tensors=self.framework )
def _a ( self : List[str] , _snake_case : str ):
"""simple docstring"""
return self.model(**_snake_case )
def _a ( self : List[str] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = model_outputs.logits[0].numpy()
A__ = softmax(_snake_case )
A__ = np.argmax(_snake_case )
A__ = self.model.config.idalabel[best_class]
A__ = probabilities[best_class].item()
A__ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 52
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 1
|
from manim import *
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = Rectangle(height=0.5 , width=0.5 )
A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A__ = [mem.copy() for i in range(6 )]
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
A__ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
A__ = VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 )
A__ = Text('CPU' , font_size=24 )
A__ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_snake_case )
A__ = [mem.copy() for i in range(1 )]
A__ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
A__ = Text('GPU' , font_size=24 )
A__ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
gpu.align_to(_snake_case , _snake_case )
gpu.set_x(gpu.get_x() - 1 )
self.add(_snake_case )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
A__ = Text('Model' , font_size=24 )
A__ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
model.move_to([3, -1.0, 0] )
self.play(
Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , )
A__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
A__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_snake_case , run_time=2.5 ) , Write(_snake_case ) , Write(_snake_case ) )
self.add(_snake_case )
A__ = []
A__ = []
A__ = []
for i, rect in enumerate(_snake_case ):
A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 )
cpu_target.move_to(_snake_case )
cpu_target.generate_target()
A__ = 0.46 / 4
A__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_snake_case , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_snake_case , buff=0.0 )
cpu_targs.append(_snake_case )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_snake_case ) )
second_animations.append(MoveToTarget(_snake_case , run_time=1.5 ) )
self.play(*_snake_case )
self.play(*_snake_case )
self.wait()
| 52
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 1
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Dict = ["pixel_values"]
def __init__( self : List[str] , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BICUBIC , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 2_55 , _snake_case : bool = True , _snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = size if size is not None else {'shortest_edge': 2_24}
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
A__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A__ = get_size_dict(_snake_case , param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self : str , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BICUBIC , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : List[Any] , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A__ = int((2_56 / 2_24) * size['shortest_edge'] )
A__ = get_resize_output_image_size(_snake_case , size=_snake_case , default_to_square=_snake_case )
A__ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_snake_case , size=(size_dict['height'], size_dict['width']) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : List[str] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case )
def _a ( self : List[Any] , _snake_case : np.ndarray , _snake_case : Union[int, float] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any , ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Optional[Any] , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Tuple , _snake_case : ImageInput , _snake_case : Optional[bool] = None , _snake_case : Optional[Dict[str, int]] = None , _snake_case : PILImageResampling = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Dict[str, int]] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[float] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[float, Iterable[float]]] = None , _snake_case : Optional[Union[float, Iterable[float]]] = None , _snake_case : Optional[TensorType] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : Any , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(_snake_case , param_name='crop_size' )
A__ = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
A__ = [self.resize(_snake_case , _snake_case , _snake_case ) for image in images]
if do_center_crop:
A__ = [self.center_crop(_snake_case , _snake_case ) for image in images]
if do_rescale:
A__ = [self.rescale(_snake_case , _snake_case ) for image in images]
if do_normalize:
A__ = [self.normalize(_snake_case , _snake_case , _snake_case ) for image in images]
A__ = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
A__ = {'pixel_values': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 52
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , _snake_case : int , _snake_case : Any=3 , _snake_case : List[Any]=32 , _snake_case : Tuple=3 , _snake_case : Any=10 , _snake_case : str=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Union[str, Any]=True , _snake_case : Any="relu" , _snake_case : Union[str, Any]=3 , _snake_case : Optional[int]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(_snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _a ( self : Any , _snake_case : List[str] , _snake_case : List[str] , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFResNetModel(config=_snake_case )
A__ = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Any , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : List[Any] ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFResNetForImageClassification(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A__ : List[str] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
A__ : Any = False
A__ : Dict = False
A__ : Optional[Any] = False
A__ : str = False
A__ : str = False
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = TFResNetModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def _a ( self : Dict ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Any ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def _a ( self : str ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Tuple , _snake_case : int , _snake_case : Optional[Any] ):
A__ = model_class(_snake_case )
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A__ = layer_type
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Optional[int]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='tf' )
# forward pass
A__ = model(**_snake_case )
# verify the logits
A__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1E-4 ) )
| 52
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 1
|
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def A ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 52
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = LongformerTokenizer
A__ : Optional[int] = True
A__ : Any = LongformerTokenizerFast
A__ : Dict = True
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : int , **_snake_case : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : List[Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = "longformer"
def __init__( self : int , _snake_case : Union[List[int], int] = 5_12 , _snake_case : int = 2 , _snake_case : int = 1 , _snake_case : int = 0 , _snake_case : int = 2 , _snake_case : int = 3_05_22 , _snake_case : int = 7_68 , _snake_case : int = 12 , _snake_case : int = 12 , _snake_case : int = 30_72 , _snake_case : str = "gelu" , _snake_case : float = 0.1 , _snake_case : float = 0.1 , _snake_case : int = 5_12 , _snake_case : int = 2 , _snake_case : float = 0.02 , _snake_case : float = 1E-12 , _snake_case : bool = False , **_snake_case : str , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
A__ = attention_window
A__ = sep_token_id
A__ = bos_token_id
A__ = eos_token_id
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = onnx_export
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , _snake_case : "PretrainedConfig" , _snake_case : str = "default" , _snake_case : "List[PatchingSpec]" = None ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case , _snake_case )
A__ = True
@property
def _a ( self : str ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def _a ( self : int ):
"""simple docstring"""
A__ = super().outputs
if self.task == "default":
A__ = {0: 'batch'}
return outputs
@property
def _a ( self : str ):
"""simple docstring"""
return 1E-4
@property
def _a ( self : str ):
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def _a ( self : Tuple , _snake_case : "PreTrainedTokenizerBase" , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
A__ = super().generate_dummy_inputs(
preprocessor=_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
A__ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
A__ = 1
return inputs
| 52
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCamelCase ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / 'cache'
A__ = test_hf_cache_home / 'datasets'
A__ = test_hf_cache_home / 'metrics'
A__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='session' )
def A ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
| 52
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
A__ : str = "bit"
A__ : Any = ["preactivation", "bottleneck"]
A__ : Optional[int] = ["SAME", "VALID"]
def __init__( self : Dict , _snake_case : List[str]=3 , _snake_case : Optional[Any]=64 , _snake_case : Dict=[2_56, 5_12, 10_24, 20_48] , _snake_case : Any=[3, 4, 6, 3] , _snake_case : Union[str, Any]="preactivation" , _snake_case : Optional[int]="relu" , _snake_case : Optional[Any]=None , _snake_case : str=32 , _snake_case : Dict=0.0 , _snake_case : Any=False , _snake_case : List[str]=32 , _snake_case : Optional[int]=1 , _snake_case : Optional[Any]=None , _snake_case : Optional[Any]=None , **_snake_case : int , ):
"""simple docstring"""
super().__init__(**_snake_case )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A__ = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
A__ = num_channels
A__ = embedding_size
A__ = hidden_sizes
A__ = depths
A__ = layer_type
A__ = hidden_act
A__ = global_padding
A__ = num_groups
A__ = drop_path_rate
A__ = embedding_dynamic_padding
A__ = output_stride
A__ = width_factor
A__ = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(_snake_case ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
| 52
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = args.log_outputs
A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
A__ = load_metric('wer' )
A__ = load_metric('cer' )
# compute metrics
A__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
A__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
A__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f'''log_{dataset_id}_predictions.txt'''
A__ = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(__UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
A__ = ' '.join(text.split(__UpperCamelCase ) )
return text
def A ( __UpperCamelCase ) -> Union[str, Any]:
# load dataset
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
A__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['text']
A__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 52
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : int
A__ : Node | None = None
A__ : Node | None = None
def A ( ) -> Node | None:
A__ = Node(1 )
A__ = Node(2 )
A__ = Node(3 )
A__ = Node(4 )
A__ = Node(5 )
return tree
def A ( __UpperCamelCase ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def A ( __UpperCamelCase ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def A ( __UpperCamelCase ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def A ( __UpperCamelCase ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def A ( __UpperCamelCase ) -> Sequence[Node | None]:
A__ = []
if root is None:
return output
A__ = deque([root] )
while process_queue:
A__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def A ( __UpperCamelCase , __UpperCamelCase ) -> Sequence[Node | None]:
A__ = []
def populate_output(__UpperCamelCase , __UpperCamelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__UpperCamelCase , __UpperCamelCase )
return output
def A ( __UpperCamelCase , __UpperCamelCase ) -> Sequence[Node | None]:
A__ = []
def populate_output(__UpperCamelCase , __UpperCamelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__UpperCamelCase , __UpperCamelCase )
return output
def A ( __UpperCamelCase ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
A__ = []
A__ = 0
A__ = height(__UpperCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__UpperCamelCase , __UpperCamelCase ) )
A__ = 1
else:
output.append(get_nodes_from_right_to_left(__UpperCamelCase , __UpperCamelCase ) )
A__ = 0
return output
def A ( ) -> None: # Main function for testing.
A__ = make_tree()
print(f'''In-order Traversal: {inorder(__UpperCamelCase )}''' )
print(f'''Pre-order Traversal: {preorder(__UpperCamelCase )}''' )
print(f'''Post-order Traversal: {postorder(__UpperCamelCase )}''' , '\n' )
print(f'''Height of Tree: {height(__UpperCamelCase )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(__UpperCamelCase ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(__UpperCamelCase ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(__UpperCamelCase , level=__UpperCamelCase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(__UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
| 1
|
def A ( __UpperCamelCase = 600_851_475_143 ) -> int:
try:
A__ = int(__UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
A__ = 1
A__ = 2
while i * i <= n:
while n % i == 0:
A__ = i
n //= i
i += 1
if n > 1:
A__ = n
return int(__UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 52
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE__ = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
A__ = self.transformer_dir
shutil.copy(
os.path.join(_snake_case , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def _a ( self : List[str] , _snake_case : Tuple , _snake_case : int , _snake_case : Tuple , _snake_case : int=None ):
"""simple docstring"""
A__ = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
A__ = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
A__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ = black.format_str(_snake_case , mode=_snake_case )
A__ = os.path.join(self.transformer_dir , 'new_code.py' )
with open(_snake_case , 'w' , newline='\n' ) as f:
f.write(_snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_snake_case )
with open(_snake_case , 'r' ) as f:
self.assertTrue(f.read() , _snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(_snake_case , _snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , _snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , _snake_case ) , )
# Copy consistency with a really long name
A__ = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , _snake_case , _snake_case ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , _snake_case , overwrite_result=re.sub('Bert' , 'TestModel' , _snake_case ) , )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = check_copies.LOCALIZED_READMES['README_zh-hans.md']
A__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
A__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
A__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
A__ , A__ = check_copies.convert_to_localized_md(
_snake_case , _snake_case , localized_readme['format_model_list'] )
self.assertFalse(_snake_case )
self.assertEqual(_snake_case , _snake_case )
A__ , A__ = check_copies.convert_to_localized_md(
_snake_case , _snake_case , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_snake_case )
A__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
A__ = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
A__ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
A__ , A__ = check_copies.convert_to_localized_md(
_snake_case , _snake_case , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(_snake_case , _snake_case )
| 52
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52
| 1
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = args.log_outputs
A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
A__ = load_metric('wer' )
A__ = load_metric('cer' )
# compute metrics
A__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
A__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
A__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f'''log_{dataset_id}_predictions.txt'''
A__ = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(__UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
A__ = ' '.join(text.split(__UpperCamelCase ) )
return text
def A ( __UpperCamelCase ) -> Union[str, Any]:
# load dataset
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
A__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['text']
A__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 52
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self : str ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self : str ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 52
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Any ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
A__ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 1_28},
}
A__ = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_snake_case , _snake_case )
def _a ( self : str , **_snake_case : Tuple ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Union[str, Any] , **_snake_case : Optional[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : str ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) )
return image_input
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A__ = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = self.prepare_image_inputs()
A__ = image_processor(_snake_case , return_tensors='np' )
A__ = processor(images=_snake_case , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'test'
A__ = processor(text=_snake_case )
A__ = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'test'
A__ = self.prepare_image_inputs()
A__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def _a ( self : str ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(_snake_case )
A__ = tokenizer.batch_decode(_snake_case )
A__ = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(_snake_case , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _a ( self : str ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 52
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Optional[Any] = "BridgeTowerImageProcessor"
A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case )
encoding.update(_snake_case )
return encoding
def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 52
| 1
|
from __future__ import annotations
def A ( __UpperCamelCase , __UpperCamelCase ) -> list[int]:
A__ = 0
A__ = len(__UpperCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A__ = i + 1
else:
A__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 1_1, 1_5], 9) = }')
| 52
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 1
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.dummy_uncond_unet
A__ = ScoreSdeVeScheduler()
A__ = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case )
sde_ve.to(_snake_case )
sde_ve.set_progress_bar_config(disable=_snake_case )
A__ = torch.manual_seed(0 )
A__ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_snake_case ).images
A__ = torch.manual_seed(0 )
A__ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_snake_case , return_dict=_snake_case )[
0
]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = 'google/ncsnpp-church-256'
A__ = UNetaDModel.from_pretrained(_snake_case )
A__ = ScoreSdeVeScheduler.from_pretrained(_snake_case )
A__ = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case )
sde_ve.to(_snake_case )
sde_ve.set_progress_bar_config(disable=_snake_case )
A__ = torch.manual_seed(0 )
A__ = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=_snake_case ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 52
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Union[str, Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 52
| 1
|
from collections import defaultdict
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
A__ = first_str.lower().strip()
A__ = second_str.lower().strip()
# Remove whitespace
A__ = first_str.replace(' ' , '' )
A__ = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
return False
# Default values for count should be 0
A__ = defaultdict(__UpperCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__UpperCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE__ = input('''Enter the first string ''').strip()
SCREAMING_SNAKE_CASE__ = input('''Enter the second string ''').strip()
SCREAMING_SNAKE_CASE__ = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 52
|
def A ( __UpperCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
A__ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A__ = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_snake_case , _snake_case )
def _a ( self : Union[str, Any] , **_snake_case : Tuple ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Dict , **_snake_case : Union[str, Any] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : Union[str, Any] ):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _snake_case )
self.assertIsInstance(processor_fast.tokenizer , _snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _snake_case )
self.assertIsInstance(processor_fast.image_processor , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A__ = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = self.prepare_image_inputs()
A__ = image_processor(_snake_case , return_tensors='np' )
A__ = processor(images=_snake_case , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = processor(text=_snake_case )
A__ = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def _a ( self : str ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(_snake_case )
A__ = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 52
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 1
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : int = (DDPMScheduler,)
def _a ( self : Optional[Any] , **_snake_case : Any ):
"""simple docstring"""
A__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_snake_case )
return config
def _a ( self : Dict ):
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def _a ( self : Any ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def _a ( self : str ):
"""simple docstring"""
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_snake_case )
A__ = len(_snake_case )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
A__ = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(_snake_case ) )
A__ = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='v_prediction' )
A__ = scheduler_class(**_snake_case )
A__ = len(_snake_case )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
A__ = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(_snake_case ) )
A__ = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _a ( self : Any ):
"""simple docstring"""
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_snake_case )
A__ = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_snake_case )
A__ = scheduler.timesteps
for i, timestep in enumerate(_snake_case ):
if i == len(_snake_case ) - 1:
A__ = -1
else:
A__ = timesteps[i + 1]
A__ = scheduler.previous_timestep(_snake_case )
A__ = prev_t.item()
self.assertEqual(_snake_case , _snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_snake_case )
A__ = [1_00, 87, 50, 51, 0]
with self.assertRaises(_snake_case , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_snake_case )
A__ = [1_00, 87, 50, 1, 0]
A__ = len(_snake_case )
with self.assertRaises(_snake_case , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_snake_case )
A__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_snake_case , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_snake_case )
| 52
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
| 1
|
def A ( __UpperCamelCase ) -> int:
A__ = [[0 for _ in range(__UpperCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
A__ = 1
for n in range(m + 1 ):
for k in range(1 , __UpperCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE__ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 52
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Any:
if attention_mask is None:
A__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
A__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
A__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , _snake_case : Any , _snake_case : Any=13 , _snake_case : int=7 , _snake_case : List[str]=True , _snake_case : Union[str, Any]=False , _snake_case : List[str]=99 , _snake_case : List[Any]=16 , _snake_case : Union[str, Any]=2 , _snake_case : List[Any]=4 , _snake_case : Tuple=4 , _snake_case : int="gelu" , _snake_case : Union[str, Any]=0.1 , _snake_case : str=0.1 , _snake_case : List[Any]=32 , _snake_case : str=2 , _snake_case : Dict=1 , _snake_case : int=0 , _snake_case : int=0.02 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
A__ = initializer_range
def _a ( self : str ):
"""simple docstring"""
A__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
A__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
A__ = shift_tokens_right(_snake_case , 1 , 2 )
A__ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_snake_case , )
A__ = prepare_blenderbot_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self : Any , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : str ):
"""simple docstring"""
A__ = 20
A__ = model_class_name(_snake_case )
A__ = model.encode(inputs_dict['input_ids'] )
A__ , A__ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
A__ = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case )
A__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
A__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ = model.decode(
decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , )
A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
A__ = model.decode(
decoder_input_ids[:, -1:] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_snake_case , )
A__ = model.decode(_snake_case , _snake_case )
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _a ( self : Any , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : List[Any] ):
"""simple docstring"""
A__ = 20
A__ = model_class_name(_snake_case )
A__ = model.encode(inputs_dict['input_ids'] )
A__ , A__ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
A__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A__ = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case )
A__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ = model.decode(
decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , )
A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
A__ = model.decode(
decoder_input_ids[:, -1:] , _snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_snake_case , decoder_position_ids=_snake_case , )
A__ = model.decode(_snake_case , _snake_case , decoder_attention_mask=_snake_case )
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Any = 99
def _a ( self : int ):
"""simple docstring"""
A__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
A__ = input_ids.shape[0]
A__ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _a ( self : Tuple ):
"""simple docstring"""
A__ , A__ , A__ = self._get_config_and_data()
A__ = FlaxBlenderbotSmallForConditionalGeneration(_snake_case )
A__ = lm_model(input_ids=_snake_case )
A__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
A__ = FlaxBlenderbotSmallForConditionalGeneration(_snake_case )
A__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
A__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
A__ = lm_model(input_ids=_snake_case , decoder_input_ids=_snake_case )
A__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
A__ = shift_tokens_right(_snake_case , 1 , 2 )
A__ = np.equal(_snake_case , 1 ).astype(np.floataa ).sum()
A__ = np.equal(_snake_case , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_snake_case , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase , UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = True
A__ : Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
A__ : int = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _a ( self : Any ):
"""simple docstring"""
A__ = FlaxBlenderbotSmallModelTester(self )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_snake_case , _snake_case , _snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_snake_case , _snake_case , _snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = self._prepare_for_class(_snake_case , _snake_case )
A__ = model_class(_snake_case )
@jax.jit
def encode_jitted(_snake_case : str , _snake_case : int=None , **_snake_case : Union[str, Any] ):
return model.encode(input_ids=_snake_case , attention_mask=_snake_case )
with self.subTest('JIT Enabled' ):
A__ = encode_jitted(**_snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
A__ = encode_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self : Tuple ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = model_class(_snake_case )
A__ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
A__ = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Tuple ):
return model.decode(
decoder_input_ids=_snake_case , decoder_attention_mask=_snake_case , encoder_outputs=_snake_case , )
with self.subTest('JIT Enabled' ):
A__ = decode_jitted(**_snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
A__ = decode_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self : List[str] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A__ = np.ones((1, 1) ) * model.config.eos_token_id
A__ = model(_snake_case )
self.assertIsNotNone(_snake_case )
| 52
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 1
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ = 1_6
SCREAMING_SNAKE_CASE__ = 3_2
def A ( __UpperCamelCase , __UpperCamelCase = 16 ) -> List[str]:
A__ = AutoTokenizer.from_pretrained('bert-base-cased' )
A__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
__UpperCamelCase , padding='longest' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='pt' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['train'] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ = DataLoader(
tokenized_datasets['validation'] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ = mocked_dataloaders # noqa: F811
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __UpperCamelCase ) == "1":
A__ = 2
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config['num_epochs'] )
A__ = int(config['seed'] )
A__ = int(config['batch_size'] )
A__ = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__UpperCamelCase )
def inner_training_loop(__UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=__UpperCamelCase )
A__ , A__ = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**__UpperCamelCase )
A__ = outputs.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**__UpperCamelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def A ( ) -> Tuple:
A__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
A__ = parser.parse_args()
A__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 52
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A ( __UpperCamelCase ) -> Optional[int]:
# A local function to see if a dot lands in the circle.
def is_in_circle(__UpperCamelCase , __UpperCamelCase ) -> bool:
A__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
A__ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
A__ = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def A ( __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = 1.0 ) -> None:
def identity_function(__UpperCamelCase ) -> float:
return x
A__ = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def A ( __UpperCamelCase ) -> None:
def function_to_integrate(__UpperCamelCase ) -> float:
return sqrt(4.0 - x * x )
A__ = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = "dinat"
A__ : Optional[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Tuple , _snake_case : int=4 , _snake_case : Optional[Any]=3 , _snake_case : Tuple=64 , _snake_case : Optional[Any]=[3, 4, 6, 5] , _snake_case : List[Any]=[2, 4, 8, 16] , _snake_case : Optional[int]=7 , _snake_case : str=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _snake_case : Dict=3.0 , _snake_case : Dict=True , _snake_case : Union[str, Any]=0.0 , _snake_case : str=0.0 , _snake_case : Optional[int]=0.1 , _snake_case : Optional[int]="gelu" , _snake_case : List[str]=0.02 , _snake_case : List[Any]=1E-5 , _snake_case : int=0.0 , _snake_case : str=None , _snake_case : Optional[Any]=None , **_snake_case : List[str] , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = len(_snake_case )
A__ = num_heads
A__ = kernel_size
A__ = dilations
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = layer_norm_eps
A__ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
A__ = layer_scale_init_value
A__ = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(_snake_case ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
| 52
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 1
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *__UpperCamelCase ) -> Union[str, Any]:
with open(__UpperCamelCase , 'r' ) as fh:
fcntl.flock(__UpperCamelCase , fcntl.LOCK_EX )
try:
print(*__UpperCamelCase )
finally:
fcntl.flock(__UpperCamelCase , fcntl.LOCK_UN )
SCREAMING_SNAKE_CASE__ = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''', local_rank)
SCREAMING_SNAKE_CASE__ = socket.gethostname()
SCREAMING_SNAKE_CASE__ = f'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
SCREAMING_SNAKE_CASE__ = dist.get_rank()
SCREAMING_SNAKE_CASE__ = dist.get_world_size()
printflock(f'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(f'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(f'{gpu} is broken')
raise
| 52
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52
| 1
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
A__ = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
A__ = model.state_dict()
def to_tf_var_name(__UpperCamelCase ):
for patt, repl in iter(__UpperCamelCase ):
A__ = name.replace(__UpperCamelCase , __UpperCamelCase )
return f'''bert/{name}'''
def create_tf_var(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A__ = tf.dtypes.as_dtype(tensor.dtype )
A__ = tf.get_variable(dtype=__UpperCamelCase , shape=tensor.shape , name=__UpperCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
A__ = to_tf_var_name(__UpperCamelCase )
A__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
A__ = torch_tensor.T
A__ = create_tf_var(tensor=__UpperCamelCase , name=__UpperCamelCase , session=__UpperCamelCase )
tf.keras.backend.set_value(__UpperCamelCase , __UpperCamelCase )
A__ = session.run(__UpperCamelCase )
print(f'''Successfully created {tf_name}: {np.allclose(__UpperCamelCase , __UpperCamelCase )}''' )
A__ = tf.train.Saver(tf.trainable_variables() )
saver.save(__UpperCamelCase , os.path.join(__UpperCamelCase , model_name.replace('-' , '_' ) + '.ckpt' ) )
def A ( __UpperCamelCase=None ) -> Optional[Any]:
A__ = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__UpperCamelCase , required=__UpperCamelCase , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=__UpperCamelCase , required=__UpperCamelCase , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=__UpperCamelCase , required=__UpperCamelCase , help='Directory in which to save tensorflow model' )
A__ = parser.parse_args(__UpperCamelCase )
A__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__UpperCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 52
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 1
|
from pathlib import Path
import fire
from tqdm import tqdm
def A ( __UpperCamelCase="ro" , __UpperCamelCase="en" , __UpperCamelCase="wmt16" , __UpperCamelCase=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
A__ = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
A__ = datasets.load_dataset(__UpperCamelCase , __UpperCamelCase )
if save_dir is None:
A__ = f'''{dataset}-{pair}'''
A__ = Path(__UpperCamelCase )
save_dir.mkdir(exist_ok=__UpperCamelCase )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
A__ = 'val' if split == 'validation' else split
A__ = save_dir.joinpath(f'''{fn}.source''' )
A__ = save_dir.joinpath(f'''{fn}.target''' )
A__ = src_path.open('w+' )
A__ = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
A__ = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 52
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = LongformerTokenizer
A__ : Optional[int] = True
A__ : Any = LongformerTokenizerFast
A__ : Dict = True
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : int , **_snake_case : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : List[Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = KandinskyVaaPipeline
A__ : int = [
"image_embeds",
"negative_image_embeds",
]
A__ : Union[str, Any] = ["image_embeds", "negative_image_embeds"]
A__ : Union[str, Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A__ : Tuple = False
@property
def _a ( self : int ):
"""simple docstring"""
return 32
@property
def _a ( self : int ):
"""simple docstring"""
return 32
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : int ):
"""simple docstring"""
return 1_00
@property
def _a ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A__ = UNetaDConditionModel(**_snake_case )
return model
@property
def _a ( self : List[str] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_snake_case , set_alpha_to_one=_snake_case , steps_offset=1 , prediction_type='epsilon' , thresholding=_snake_case , )
A__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _a ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int]=0 ):
"""simple docstring"""
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_snake_case )
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = 'cpu'
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**_snake_case )
A__ = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
A__ = pipe(**self.get_dummy_inputs(_snake_case ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
A__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
A__ = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
A__ = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
A__ = 'red cat, 4k photo'
A__ = torch.Generator(device='cuda' ).manual_seed(0 )
A__ , A__ = pipe_prior(
_snake_case , generator=_snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
A__ = torch.Generator(device='cuda' ).manual_seed(0 )
A__ = pipeline(
image_embeds=_snake_case , negative_image_embeds=_snake_case , generator=_snake_case , num_inference_steps=1_00 , output_type='np' , )
A__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 52
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCamelCase ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / 'cache'
A__ = test_hf_cache_home / 'datasets'
A__ = test_hf_cache_home / 'metrics'
A__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='session' )
def A ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
| 52
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
SCREAMING_SNAKE_CASE__ = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
SCREAMING_SNAKE_CASE__ = cvtColor(img, COLOR_BGR2GRAY)
def A ( ) -> List[str]:
A__ = cn.convert_to_negative(__UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def A ( ) -> Tuple:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__UpperCamelCase , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def A ( ) -> Optional[int]:
A__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def A ( ) -> Any:
A__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A__ = canny.canny(__UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def A ( ) -> List[str]:
assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all()
def A ( ) -> Optional[int]:
# laplace diagonals
A__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A__ = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase )
assert res.any()
def A ( ) -> Any:
assert med.median_filter(__UpperCamelCase , 3 ).any()
def A ( ) -> Optional[Any]:
A__ , A__ = sob.sobel_filter(__UpperCamelCase )
assert grad.any() and theta.any()
def A ( ) -> Dict:
A__ = sp.make_sepia(__UpperCamelCase , 20 )
assert sepia.all()
def A ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]:
A__ = bs.Burkes(imread(__UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def A ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[int]:
A__ = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def A ( ) -> List[str]:
A__ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
A__ = imread(__UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
A__ = 0
A__ = 0
A__ = image[x_coordinate][y_coordinate]
A__ = lbp.get_neighbors_pixel(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A__ = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert lbp_image.any()
| 52
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = args.log_outputs
A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
A__ = load_metric('wer' )
A__ = load_metric('cer' )
# compute metrics
A__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
A__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
A__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f'''log_{dataset_id}_predictions.txt'''
A__ = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(__UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
A__ = ' '.join(text.split(__UpperCamelCase ) )
return text
def A ( __UpperCamelCase ) -> Union[str, Any]:
# load dataset
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
A__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['text']
A__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 52
| 1
|
import string
from math import logaa
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
A__ = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A ( __UpperCamelCase , __UpperCamelCase ) -> tuple[int, int]:
A__ = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
A__ = corpus_without_punctuation.split('\n' )
A__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__UpperCamelCase ))
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
return round(tf * idf , 3 )
| 52
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
| 1
|
def A ( __UpperCamelCase ) -> str:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
A__ = False
if num < 0:
A__ = True
A__ = -num
A__ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__UpperCamelCase ) for e in binary )
return "0b" + "".join(str(__UpperCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 1
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52
| 1
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
A__ = tau * frequency / samplerate
A__ = sin(__UpperCamelCase )
A__ = cos(__UpperCamelCase )
A__ = _sin / (2 * q_factor)
A__ = (1 - _cos) / 2
A__ = 1 - _cos
A__ = 1 + alpha
A__ = -2 * _cos
A__ = 1 - alpha
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
A__ = tau * frequency / samplerate
A__ = sin(__UpperCamelCase )
A__ = cos(__UpperCamelCase )
A__ = _sin / (2 * q_factor)
A__ = (1 + _cos) / 2
A__ = -1 - _cos
A__ = 1 + alpha
A__ = -2 * _cos
A__ = 1 - alpha
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
A__ = tau * frequency / samplerate
A__ = sin(__UpperCamelCase )
A__ = cos(__UpperCamelCase )
A__ = _sin / (2 * q_factor)
A__ = _sin / 2
A__ = 0
A__ = -ba
A__ = 1 + alpha
A__ = -2 * _cos
A__ = 1 - alpha
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
A__ = tau * frequency / samplerate
A__ = sin(__UpperCamelCase )
A__ = cos(__UpperCamelCase )
A__ = _sin / (2 * q_factor)
A__ = 1 - alpha
A__ = -2 * _cos
A__ = 1 + alpha
A__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
A__ = tau * frequency / samplerate
A__ = sin(__UpperCamelCase )
A__ = cos(__UpperCamelCase )
A__ = _sin / (2 * q_factor)
A__ = 10 ** (gain_db / 40)
A__ = 1 + alpha * big_a
A__ = -2 * _cos
A__ = 1 - alpha * big_a
A__ = 1 + alpha / big_a
A__ = -2 * _cos
A__ = 1 - alpha / big_a
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
A__ = tau * frequency / samplerate
A__ = sin(__UpperCamelCase )
A__ = cos(__UpperCamelCase )
A__ = _sin / (2 * q_factor)
A__ = 10 ** (gain_db / 40)
A__ = (big_a + 1) - (big_a - 1) * _cos
A__ = (big_a + 1) + (big_a - 1) * _cos
A__ = (big_a - 1) - (big_a + 1) * _cos
A__ = (big_a - 1) + (big_a + 1) * _cos
A__ = 2 * sqrt(__UpperCamelCase ) * alpha
A__ = big_a * (pmc + aaa)
A__ = 2 * big_a * mpc
A__ = big_a * (pmc - aaa)
A__ = ppmc + aaa
A__ = -2 * pmpc
A__ = ppmc - aaa
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
A__ = tau * frequency / samplerate
A__ = sin(__UpperCamelCase )
A__ = cos(__UpperCamelCase )
A__ = _sin / (2 * q_factor)
A__ = 10 ** (gain_db / 40)
A__ = (big_a + 1) - (big_a - 1) * _cos
A__ = (big_a + 1) + (big_a - 1) * _cos
A__ = (big_a - 1) - (big_a + 1) * _cos
A__ = (big_a - 1) + (big_a + 1) * _cos
A__ = 2 * sqrt(__UpperCamelCase ) * alpha
A__ = big_a * (ppmc + aaa)
A__ = -2 * big_a * pmpc
A__ = big_a * (ppmc - aaa)
A__ = pmc + aaa
A__ = 2 * mpc
A__ = pmc - aaa
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 52
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self : str ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self : str ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 52
| 1
|
from __future__ import annotations
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> None:
A__ = len(__UpperCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__UpperCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __UpperCamelCase , __UpperCamelCase , )
def A ( __UpperCamelCase ) -> None:
A__ = []
depth_first_search([] , [] , [] , __UpperCamelCase , __UpperCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(__UpperCamelCase )
print('' )
print(len(__UpperCamelCase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 52
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Optional[Any] = "BridgeTowerImageProcessor"
A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case )
encoding.update(_snake_case )
return encoding
def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 52
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 1
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
SCREAMING_SNAKE_CASE__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ) -> List[Any]:
# Initialise PyTorch model
A__ = XLNetConfig.from_json_file(__UpperCamelCase )
A__ = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
A__ = finetuning_task
A__ = GLUE_TASKS_NUM_LABELS[finetuning_task]
A__ = XLNetForSequenceClassification(__UpperCamelCase )
elif "squad" in finetuning_task:
A__ = finetuning_task
A__ = XLNetForQuestionAnswering(__UpperCamelCase )
else:
A__ = XLNetLMHeadModel(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
A__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
A__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
print(f'''Save PyTorch model to {os.path.abspath(__UpperCamelCase )}''' )
torch.save(model.state_dict() , __UpperCamelCase )
print(f'''Save configuration file to {os.path.abspath(__UpperCamelCase )}''' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 52
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 1
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __lowerCAmelCase :
"""simple docstring"""
A__ : Dict = PegasusConfig
A__ : List[Any] = {}
A__ : int = "gelu"
def __init__( self : Dict , _snake_case : Optional[Any] , _snake_case : Dict=13 , _snake_case : Union[str, Any]=7 , _snake_case : List[str]=True , _snake_case : List[str]=False , _snake_case : Any=99 , _snake_case : List[Any]=32 , _snake_case : List[Any]=2 , _snake_case : Tuple=4 , _snake_case : Union[str, Any]=37 , _snake_case : Optional[Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Optional[int]=40 , _snake_case : Optional[Any]=2 , _snake_case : Tuple=1 , _snake_case : int=0 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_pegasus_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFPegasusModel(config=_snake_case ).get_decoder()
A__ = inputs_dict['input_ids']
A__ = input_ids[:1, :]
A__ = inputs_dict['attention_mask'][:1, :]
A__ = inputs_dict['head_mask']
A__ = 1
# first forward pass
A__ = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(_snake_case , attention_mask=_snake_case )[0]
A__ = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Tuple:
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
A__ : Any = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
A__ : Optional[int] = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : Optional[Any] = True
A__ : Optional[Any] = False
A__ : Union[str, Any] = False
def _a ( self : str ):
"""simple docstring"""
A__ = TFPegasusModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
A__ : Union[str, Any] = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
A__ : Optional[int] = "google/pegasus-xsum"
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a ( self : Union[str, Any] , **_snake_case : Optional[int] ):
"""simple docstring"""
A__ = self.translate_src_text(**_snake_case )
assert self.expected_text == generated_words
def _a ( self : Any , **_snake_case : Any ):
"""simple docstring"""
A__ = self.tokenizer(self.src_text , **_snake_case , padding=_snake_case , return_tensors='tf' )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_snake_case , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_snake_case )
return generated_words
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 52
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Union[str, Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 52
| 1
|
from __future__ import annotations
from random import choice
def A ( __UpperCamelCase ) -> Optional[Any]:
return choice(__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = random_pivot(__UpperCamelCase )
# partition based on pivot
# linear time
A__ = [e for e in lst if e < pivot]
A__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__UpperCamelCase ) < k - 1:
return kth_number(__UpperCamelCase , k - len(__UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
def A ( __UpperCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 1
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Tuple , _snake_case : Optional[Any]=13 , _snake_case : str=32 , _snake_case : Tuple=3 , _snake_case : int=4 , _snake_case : Any=[10, 20, 30, 40] , _snake_case : Optional[int]=[2, 2, 3, 2] , _snake_case : Optional[int]=True , _snake_case : List[str]=True , _snake_case : Any=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=10 , _snake_case : Optional[int]=0.02 , _snake_case : List[str]=["stage2", "stage3", "stage4"] , _snake_case : Union[str, Any]=[2, 3, 4] , _snake_case : List[str]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = num_labels
A__ = initializer_range
A__ = out_features
A__ = out_indices
A__ = scope
def _a ( self : Tuple ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _a ( self : List[str] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ConvNextVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Any ):
"""simple docstring"""
A__ = ConvNextVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Any ):
"""simple docstring"""
A__ = ConvNextVaBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A__ = None
A__ = ConvNextVaBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Tuple = False
A__ : Dict = False
A__ : str = False
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ConvNextVaModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : str ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A__ , A__ = self.model_tester.prepare_config_and_inputs_with_labels()
A__ = True
if model_class.__name__ in [
*get_values(_snake_case ),
*get_values(_snake_case ),
]:
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
def _a ( self : Optional[int] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A__ , A__ = self.model_tester.prepare_config_and_inputs_with_labels()
A__ = False
A__ = True
if (
model_class.__name__
in [*get_values(_snake_case ), *get_values(_snake_case )]
or not model_class.supports_gradient_checkpointing
):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.gradient_checkpointing_enable()
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ConvNextVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Any:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = preprocessor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
| 52
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 1
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : List[str] , _snake_case : List[str]=7 , _snake_case : Dict=3 , _snake_case : List[str]=30 , _snake_case : Dict=4_00 , _snake_case : List[str]=True , _snake_case : int=None , _snake_case : Optional[Any]=True , _snake_case : Tuple=[0.5, 0.5, 0.5] , _snake_case : List[Any]=[0.5, 0.5, 0.5] , _snake_case : Any=True , _snake_case : Optional[int]=1 / 2_55 , _snake_case : Any=True , ):
"""simple docstring"""
A__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def _a ( self : str ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a ( self : List[str] , _snake_case : Tuple , _snake_case : str=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(_snake_case , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['shortest_edge'] * h / w )
A__ = self.size['shortest_edge']
elif w > h:
A__ = self.size['shortest_edge']
A__ = int(self.size['shortest_edge'] * w / h )
else:
A__ = self.size['shortest_edge']
A__ = self.size['shortest_edge']
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(_snake_case , key=lambda _snake_case : item[0] )[0]
A__ = max(_snake_case , key=lambda _snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = YolosImageProcessor if is_vision_available() else None
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def _a ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'image_mean' ) )
self.assertTrue(hasattr(_snake_case , 'image_std' ) )
self.assertTrue(hasattr(_snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _snake_case )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case )
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=_snake_case , do_normalize=_snake_case , do_rescale=_snake_case )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(_snake_case , return_tensors='pt' )
A__ = image_processing_a(_snake_case , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1E-4 ) )
@slow
def _a ( self : List[str] ):
"""simple docstring"""
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A__ = json.loads(f.read() )
A__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
A__ = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
A__ = image_processing(images=_snake_case , annotations=_snake_case , return_tensors='pt' )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _snake_case )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _snake_case , atol=1E-4 ) )
# verify area
A__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _snake_case ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _snake_case )
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _snake_case , atol=1E-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _snake_case ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _snake_case ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _snake_case ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _snake_case ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _snake_case ) )
@slow
def _a ( self : List[str] ):
"""simple docstring"""
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A__ = json.loads(f.read() )
A__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
A__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A__ = YolosImageProcessor(format='coco_panoptic' )
A__ = image_processing(images=_snake_case , annotations=_snake_case , masks_path=_snake_case , return_tensors='pt' )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _snake_case )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _snake_case , atol=1E-4 ) )
# verify area
A__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _snake_case ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _snake_case )
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _snake_case , atol=1E-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _snake_case ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _snake_case ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _snake_case ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _snake_case )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _snake_case ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _snake_case ) )
| 52
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
| 1
|
def A ( __UpperCamelCase ) -> Union[str, Any]:
A__ , A__ = [], []
while len(__UpperCamelCase ) > 1:
A__ , A__ = min(__UpperCamelCase ), max(__UpperCamelCase )
start.append(__UpperCamelCase )
end.append(__UpperCamelCase )
collection.remove(__UpperCamelCase )
collection.remove(__UpperCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 52
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 1
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
if "xprophetnet" in prophetnet_checkpoint_path:
A__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
A__ , A__ = XLMProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
else:
A__ = ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
A__ , A__ = ProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
A__ = ['key_proj', 'value_proj', 'query_proj']
A__ = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
A__ = key.split('.' )
if attributes[0] == "lm_head":
A__ = prophet
A__ = prophet_old
else:
A__ = prophet.prophetnet
A__ = prophet_old.model
A__ = False
for attribute in attributes:
if attribute in mapping:
A__ = mapping[attribute]
if not hasattr(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) > 0:
A__ = attribute
elif hasattr(__UpperCamelCase , __UpperCamelCase ):
A__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
A__ = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
A__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
A__ = old_model.bias
logger.info(f'''{attribute} is initialized''' )
A__ = True
break
elif attribute in special_keys and hasattr(__UpperCamelCase , 'in_proj_weight' ):
A__ = old_model.in_proj_weight.shape[0] // 3
A__ = getattr(__UpperCamelCase , __UpperCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
A__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
A__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
A__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
A__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
A__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
A__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
A__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
A__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
A__ = True
break
if attribute.isdigit():
A__ = model[int(__UpperCamelCase )]
A__ = old_model[int(__UpperCamelCase )]
else:
A__ = getattr(__UpperCamelCase , __UpperCamelCase )
if old_attribute == "":
A__ = old_model
else:
if not hasattr(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
A__ = getattr(__UpperCamelCase , __UpperCamelCase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 52
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 1
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger('''transformers.models.encodec''')
SCREAMING_SNAKE_CASE__ = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
SCREAMING_SNAKE_CASE__ = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
SCREAMING_SNAKE_CASE__ = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
SCREAMING_SNAKE_CASE__ = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
SCREAMING_SNAKE_CASE__ = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
SCREAMING_SNAKE_CASE__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
SCREAMING_SNAKE_CASE__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
for attribute in key.split('.' ):
A__ = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
A__ = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
A__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
elif weight_type == "running_mean":
A__ = value
elif weight_type == "running_var":
A__ = value
elif weight_type == "num_batches_tracked":
A__ = value
elif weight_type == "weight_ih_l0":
A__ = value
elif weight_type == "weight_hh_l0":
A__ = value
elif weight_type == "bias_ih_l0":
A__ = value
elif weight_type == "bias_hh_l0":
A__ = value
elif weight_type == "weight_ih_l1":
A__ = value
elif weight_type == "weight_hh_l1":
A__ = value
elif weight_type == "bias_ih_l1":
A__ = value
elif weight_type == "bias_hh_l1":
A__ = value
else:
A__ = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
A__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
A__ = MAPPING_24K
elif model_name == "encodec_48khz":
A__ = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(__UpperCamelCase , __UpperCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
A__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A__ , A__ = key.split('.*.' )
if prefix in name and suffix in name:
A__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
A__ = True
if "*" in mapped_key:
A__ = name.split(__UpperCamelCase )[0].split('.' )[-2]
A__ = mapped_key.replace('*' , __UpperCamelCase )
if "weight_g" in name:
A__ = 'weight_g'
elif "weight_v" in name:
A__ = 'weight_v'
elif "weight_ih_l0" in name:
A__ = 'weight_ih_l0'
elif "weight_hh_l0" in name:
A__ = 'weight_hh_l0'
elif "bias_ih_l0" in name:
A__ = 'bias_ih_l0'
elif "bias_hh_l0" in name:
A__ = 'bias_hh_l0'
elif "weight_ih_l1" in name:
A__ = 'weight_ih_l1'
elif "weight_hh_l1" in name:
A__ = 'weight_hh_l1'
elif "bias_ih_l1" in name:
A__ = 'bias_ih_l1'
elif "bias_hh_l1" in name:
A__ = 'bias_hh_l1'
elif "bias" in name:
A__ = 'bias'
elif "weight" in name:
A__ = 'weight'
elif "running_mean" in name:
A__ = 'running_mean'
elif "running_var" in name:
A__ = 'running_var'
elif "num_batches_tracked" in name:
A__ = 'num_batches_tracked'
else:
A__ = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ) -> int:
if config_path is not None:
A__ = EncodecConfig.from_pretrained(__UpperCamelCase )
else:
A__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A__ = [8, 5, 4, 4]
A__ = [2.2]
A__ = 64
A__ = 32_000
A__ = 2_048
A__ = False
A__ = False
A__ = False
elif model_name == "encodec_48khz":
A__ = [8, 5, 4, 2]
A__ = [3.0, 6.0, 12.0, 24.0]
A__ = 48_000
A__ = 2
A__ = False
A__ = 'time_group_norm'
A__ = True
A__ = 1.0
A__ = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
A__ = EncodecModel(__UpperCamelCase )
A__ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__UpperCamelCase )
A__ = torch.load(__UpperCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A__ = original_checkpoint['best_state']
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 52
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 1
|
from __future__ import annotations
import numpy as np
def A ( __UpperCamelCase ) -> tuple[np.ndarray, np.ndarray]:
A__ , A__ = np.shape(__UpperCamelCase )
if rows != columns:
A__ = (
'\'table\' has to be of square shaped array but got a '
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
A__ = np.zeros((rows, columns) )
A__ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
A__ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
A__ = (table[i][j] - total) / upper[j][j]
A__ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
A__ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
A__ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52
| 1
|
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [0 for i in range(r + 1 )]
# nc0 = 1
A__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A__ = min(__UpperCamelCase , __UpperCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 52
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE__ = '''bart'''
SCREAMING_SNAKE_CASE__ = True
@st.cache(allow_output_mutation=__UpperCamelCase )
def A ( ) -> List[str]:
if LOAD_DENSE_INDEX:
A__ = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
A__ = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
A__ = qar_model.eval()
else:
A__ , A__ = (None, None)
if MODEL_TYPE == "bart":
A__ = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
A__ = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
A__ = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
A__ = sas_model.eval()
else:
A__ , A__ = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCamelCase )
def A ( ) -> Any:
if LOAD_DENSE_INDEX:
A__ = faiss.StandardGpuResources()
A__ = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
A__ = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
A__ = faiss.IndexFlatIP(128 )
A__ = faiss.index_cpu_to_gpu(__UpperCamelCase , 1 , __UpperCamelCase )
wikiaab_gpu_index_flat.add(__UpperCamelCase ) # TODO fix for larger GPU
else:
A__ , A__ = (None, None)
A__ = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCamelCase )
def A ( ) -> List[str]:
A__ = datasets.load_dataset('eli5' , name='LFQA_reddit' )
A__ = elia['train_eli5']
A__ = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
A__ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__UpperCamelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = load_indexes()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = load_models()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = load_train_data()
def A ( __UpperCamelCase , __UpperCamelCase=10 ) -> int:
A__ = embed_questions_for_retrieval([question] , __UpperCamelCase , __UpperCamelCase )
A__ , A__ = eli5_train_q_index.search(__UpperCamelCase , __UpperCamelCase )
A__ = [elia_train[int(__UpperCamelCase )] for i in I[0]]
return nn_examples
def A ( __UpperCamelCase , __UpperCamelCase="wiki40b" , __UpperCamelCase="dense" , __UpperCamelCase=10 ) -> int:
if source == "none":
A__ , A__ = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
A__ , A__ = query_qa_dense_index(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ , A__ = query_es_index(
__UpperCamelCase , __UpperCamelCase , index_name='english_wiki40b_snippets_100w' , n_results=__UpperCamelCase , )
A__ = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
A__ = 'question: {} context: {}'.format(__UpperCamelCase , __UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCamelCase : None),
} )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=64 , __UpperCamelCase=256 , __UpperCamelCase=False , __UpperCamelCase=2 , __UpperCamelCase=0.95 , __UpperCamelCase=0.8 ) -> Tuple:
with torch.no_grad():
A__ = qa_sas_generate(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , num_answers=1 , num_beams=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase , do_sample=__UpperCamelCase , temp=__UpperCamelCase , top_p=__UpperCamelCase , top_k=__UpperCamelCase , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE__ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE__ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE__ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE__ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE__ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE__ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE__ = action_list.index(action_st)
SCREAMING_SNAKE_CASE__ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE__ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE__ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE__ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE__ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE__ = '''wiki40b'''
SCREAMING_SNAKE_CASE__ = '''dense'''
SCREAMING_SNAKE_CASE__ = '''beam'''
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 6_4
SCREAMING_SNAKE_CASE__ = 2_5_6
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE__ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE__ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE__ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE__ = st.sidebar.slider(
'''Maximum generation length''', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE__ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE__ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE__ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE__ = None
# start main text
SCREAMING_SNAKE_CASE__ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE__ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE__ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE__ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = make_support(question, source=wiki_source, method='''dense''', n_results=1_0)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = make_support(question, source=wiki_source, method='''sparse''', n_results=1_0)
SCREAMING_SNAKE_CASE__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE__ = support_list[:1_0]
SCREAMING_SNAKE_CASE__ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE__ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE__ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE__ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE__ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE__ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE__ = find_nearest_training(question)
SCREAMING_SNAKE_CASE__ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE__ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE__ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 52
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = LongformerTokenizer
A__ : Optional[int] = True
A__ : Any = LongformerTokenizerFast
A__ : Dict = True
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : int , **_snake_case : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : List[Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
| 1
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = checkpoint
A__ = {}
A__ = vae_state_dict['encoder.conv_in.weight']
A__ = vae_state_dict['encoder.conv_in.bias']
A__ = vae_state_dict['encoder.conv_out.weight']
A__ = vae_state_dict['encoder.conv_out.bias']
A__ = vae_state_dict['encoder.norm_out.weight']
A__ = vae_state_dict['encoder.norm_out.bias']
A__ = vae_state_dict['decoder.conv_in.weight']
A__ = vae_state_dict['decoder.conv_in.bias']
A__ = vae_state_dict['decoder.conv_out.weight']
A__ = vae_state_dict['decoder.conv_out.bias']
A__ = vae_state_dict['decoder.norm_out.weight']
A__ = vae_state_dict['decoder.norm_out.bias']
A__ = vae_state_dict['quant_conv.weight']
A__ = vae_state_dict['quant_conv.bias']
A__ = vae_state_dict['post_quant_conv.weight']
A__ = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
A__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
A__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
A__ = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
A__ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
A__ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
A__ = renew_vae_resnet_paths(__UpperCamelCase )
A__ = {'old': f'''down.{i}.block''', 'new': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
A__ = [key for key in vae_state_dict if 'encoder.mid.block' in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
A__ = renew_vae_resnet_paths(__UpperCamelCase )
A__ = {'old': f'''mid.block_{i}''', 'new': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
A__ = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
A__ = renew_vae_attention_paths(__UpperCamelCase )
A__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
A__ = num_up_blocks - 1 - i
A__ = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
A__ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
A__ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
A__ = renew_vae_resnet_paths(__UpperCamelCase )
A__ = {'old': f'''up.{block_id}.block''', 'new': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
A__ = [key for key in vae_state_dict if 'decoder.mid.block' in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
A__ = renew_vae_resnet_paths(__UpperCamelCase )
A__ = {'old': f'''mid.block_{i}''', 'new': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
A__ = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
A__ = renew_vae_attention_paths(__UpperCamelCase )
A__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def A ( __UpperCamelCase , __UpperCamelCase , ) -> Tuple:
# Only support V1
A__ = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
A__ = io.BytesIO(r.content )
A__ = OmegaConf.load(__UpperCamelCase )
A__ = 512
A__ = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
A__ = {}
with safe_open(__UpperCamelCase , framework='pt' , device='cpu' ) as f:
for key in f.keys():
A__ = f.get_tensor(__UpperCamelCase )
else:
A__ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )['state_dict']
# Convert the VAE model.
A__ = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
A__ = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
A__ = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 52
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCamelCase ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / 'cache'
A__ = test_hf_cache_home / 'datasets'
A__ = test_hf_cache_home / 'metrics'
A__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='session' )
def A ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
| 52
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : int = "distilbert"
A__ : Tuple = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self : Optional[Any] , _snake_case : Tuple=3_05_22 , _snake_case : int=5_12 , _snake_case : Optional[Any]=False , _snake_case : List[Any]=6 , _snake_case : Any=12 , _snake_case : Tuple=7_68 , _snake_case : Tuple=4 * 7_68 , _snake_case : int=0.1 , _snake_case : Dict=0.1 , _snake_case : str="gelu" , _snake_case : List[Any]=0.02 , _snake_case : Union[str, Any]=0.1 , _snake_case : str=0.2 , _snake_case : Union[str, Any]=0 , **_snake_case : List[Any] , ):
"""simple docstring"""
A__ = vocab_size
A__ = max_position_embeddings
A__ = sinusoidal_pos_embds
A__ = n_layers
A__ = n_heads
A__ = dim
A__ = hidden_dim
A__ = dropout
A__ = attention_dropout
A__ = activation
A__ = initializer_range
A__ = qa_dropout
A__ = seq_classif_dropout
super().__init__(**_snake_case , pad_token_id=_snake_case )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = args.log_outputs
A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
A__ = load_metric('wer' )
A__ = load_metric('cer' )
# compute metrics
A__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
A__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
A__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f'''log_{dataset_id}_predictions.txt'''
A__ = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(__UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
A__ = ' '.join(text.split(__UpperCamelCase ) )
return text
def A ( __UpperCamelCase ) -> Union[str, Any]:
# load dataset
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
A__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['text']
A__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 52
| 1
|
from collections import defaultdict
def A ( __UpperCamelCase ) -> int:
A__ = 1
A__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCamelCase )
if ret % 2 == 0:
cuts.append(__UpperCamelCase )
return ret
def A ( ) -> Dict:
dfs(1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1_0, 9
SCREAMING_SNAKE_CASE__ = defaultdict(list)
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 52
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
| 1
|
from random import randint, random
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = 5 , ) -> list:
A__ = [[-1] * number_of_cells] # Create a highway without any car
A__ = 0
A__ = max(__UpperCamelCase , 0 )
while i < number_of_cells:
A__ = (
randint(0 , __UpperCamelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = 0
A__ = highway_now[car_index + 1 :]
for cell in range(len(__UpperCamelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__UpperCamelCase , -1 )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list:
A__ = len(__UpperCamelCase )
# Beforce calculations, the highway is empty
A__ = [-1] * number_of_cells
for car_index in range(__UpperCamelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
A__ = min(highway_now[car_index] + 1 , __UpperCamelCase )
# Number of empty cell before the next car
A__ = get_distance(__UpperCamelCase , __UpperCamelCase ) - 1
# We can't have the car causing an accident
A__ = min(next_highway[car_index] , __UpperCamelCase )
if random() < probability:
# Randomly, a driver will slow down
A__ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list:
A__ = len(highway[0] )
for i in range(__UpperCamelCase ):
A__ = update(highway[i] , __UpperCamelCase , __UpperCamelCase )
A__ = [-1] * number_of_cells
for car_index in range(__UpperCamelCase ):
A__ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
A__ = (car_index + speed) % number_of_cells
# Commit the change of position
A__ = speed
highway.append(__UpperCamelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 1
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def A ( __UpperCamelCase , __UpperCamelCase=None ) -> Dict:
require_version(deps[pkg] , __UpperCamelCase )
| 52
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52
| 1
|
import requests
SCREAMING_SNAKE_CASE__ = '''''' # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE__ = '''https://api.openweathermap.org/data/2.5/'''
def A ( __UpperCamelCase = "Chicago" , __UpperCamelCase = APPID ) -> dict:
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def A ( __UpperCamelCase = "Kolkata, India" , __UpperCamelCase = APPID ) -> dict:
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def A ( __UpperCamelCase = 55.68 , __UpperCamelCase = 12.57 , __UpperCamelCase = APPID ) -> dict:
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE__ = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 52
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self : str ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self : str ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 52
| 1
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Optional[int] , _snake_case : int ):
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
A__ = {'source': 'What is love ?', 'target': 'life'}
A__ = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
A__ = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(_snake_case , F'''{split}.{field}''' ) , 'w' ) as f:
f.write(_snake_case )
def _a ( self : Union[str, Any] , _snake_case : int , _snake_case : str = "pytorch" ):
"""simple docstring"""
A__ = self.get_auto_remove_tmp_dir()
A__ = os.path.join(_snake_case , 'output' )
A__ = os.path.join(_snake_case , 'data' )
self._create_dummy_data(data_dir=_snake_case )
A__ = F'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(F'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
A__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_snake_case , env=self.get_env() )
A__ = os.path.join(_snake_case , 'metrics.json' )
with open(_snake_case ) as f:
A__ = json.load(_snake_case )
return result
@require_torch_gpu
def _a ( self : Dict ):
"""simple docstring"""
A__ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 52
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Optional[Any] = "BridgeTowerImageProcessor"
A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case )
encoding.update(_snake_case )
return encoding
def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 52
| 1
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 1
|
SCREAMING_SNAKE_CASE__ = 8.314_462 # Unit - J mol-1 K-1
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 52
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 1
|
SCREAMING_SNAKE_CASE__ = 0 # The first color of the flag.
SCREAMING_SNAKE_CASE__ = 1 # The second color of the flag.
SCREAMING_SNAKE_CASE__ = 2 # The third color of the flag.
SCREAMING_SNAKE_CASE__ = (red, white, blue)
def A ( __UpperCamelCase ) -> list:
if not sequence:
return []
if len(__UpperCamelCase ) == 1:
return list(__UpperCamelCase )
A__ = 0
A__ = len(__UpperCamelCase ) - 1
A__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
A__ , A__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
A__ , A__ = sequence[high], sequence[mid]
high -= 1
else:
A__ = f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__UpperCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input('''Enter numbers separated by commas:\n''').strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f'{dutch_national_flag_sort(unsorted)}')
| 52
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Union[str, Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 52
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A ( __UpperCamelCase ) -> bool:
A__ = int(number**0.5 )
return number == sq * sq
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> tuple[int, int]:
A__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A__ = x_den * y_den * z_den
A__ = gcd(__UpperCamelCase , __UpperCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A ( __UpperCamelCase = 35 ) -> int:
A__ = set()
A__ = 42
A__ = Fraction(0 )
A__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
A__ = x_num * y_den + x_den * y_num
A__ = x_den * y_den
A__ = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=2
A__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A__ = x_den * x_den * y_den * y_den
if is_sq(__UpperCamelCase ) and is_sq(__UpperCamelCase ):
A__ = int(sqrt(__UpperCamelCase ) )
A__ = int(sqrt(__UpperCamelCase ) )
A__ = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=-1
A__ = x_num * y_num
A__ = x_den * y_num + x_num * y_den
A__ = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=2
A__ = x_num * x_num * y_num * y_num
A__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__UpperCamelCase ) and is_sq(__UpperCamelCase ):
A__ = int(sqrt(__UpperCamelCase ) )
A__ = int(sqrt(__UpperCamelCase ) )
A__ = gcd(__UpperCamelCase , __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ = add_three(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
unique_s.add(__UpperCamelCase )
for num, den in unique_s:
total += Fraction(__UpperCamelCase , __UpperCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 52
|
def A ( __UpperCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = RobertaTokenizer
A__ : Optional[int] = RobertaTokenizerFast
A__ : Union[str, Any] = True
A__ : Union[str, Any] = {"cls_token": "<s>"}
def _a ( self : str ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : Optional[Any] , **_snake_case : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Union[str, Any] , **_snake_case : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('roberta-base' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=13 , _snake_case : Union[str, Any]=64 , _snake_case : int=3 , _snake_case : Dict=4 , _snake_case : Dict=[2, 2, 2, 2] , _snake_case : Dict=[8, 4, 2, 1] , _snake_case : Any=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : Any=[1, 2, 4, 8] , _snake_case : Union[str, Any]=True , _snake_case : List[Any]=True , _snake_case : Any="gelu" , _snake_case : str=0.1 , _snake_case : Any=0.1 , _snake_case : Optional[int]=0.02 , _snake_case : str=3 , _snake_case : str=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Any ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : str = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Any = True
A__ : List[Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : str ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : str ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Dict ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Optional[Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 52
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = VOCAB_FILES_NAMES
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = ["input_ids", "attention_mask"]
A__ : List[int] = []
A__ : List[int] = []
def __init__( self : Tuple , _snake_case : str , _snake_case : List[str]="<s>" , _snake_case : int="</s>" , _snake_case : Optional[Any]="</s>" , _snake_case : Union[str, Any]="<s>" , _snake_case : Any="<unk>" , _snake_case : str="<pad>" , _snake_case : Optional[Any]="<mask>" , _snake_case : Tuple=None , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : Optional[Dict[str, Any]] = None , _snake_case : List[str]=None , _snake_case : Optional[int]=False , **_snake_case : List[str] , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = legacy_behaviour
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , tokenizer_file=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_snake_case , **_snake_case , )
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
A__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ = 1
A__ = len(self.sp_model )
A__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case )
}
A__ = {v: k for k, v in self.lang_code_to_id.items()}
A__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A__ = src_lang if src_lang is not None else 'eng_Latn'
A__ = self.lang_code_to_id[self._src_lang]
A__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ):
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a ( self : str ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self : Any , _snake_case : str ):
"""simple docstring"""
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self : Optional[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case )) + suffix_ones
return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def _a ( self : str , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : str , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : List[Any] , _snake_case : Any , _snake_case : str , _snake_case : Optional[str] , _snake_case : Optional[str] , **_snake_case : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
A__ = src_lang
A__ = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case )
A__ = self.convert_tokens_to_ids(_snake_case )
A__ = tgt_lang_id
return inputs
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Any , _snake_case : str ):
"""simple docstring"""
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def _a ( self : str , _snake_case : List[str] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self : Optional[Any] , _snake_case : List[Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self : Optional[int] , _snake_case : Dict ):
"""simple docstring"""
A__ = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def _a ( self : Optional[Any] , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , 'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def _a ( self : Tuple , _snake_case : List[str] , _snake_case : str = "eng_Latn" , _snake_case : Optional[List[str]] = None , _snake_case : str = "fra_Latn" , **_snake_case : Dict , ):
"""simple docstring"""
A__ = src_lang
A__ = tgt_lang
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case )
def _a ( self : int ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : List[str] , _snake_case : Tuple ):
"""simple docstring"""
A__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
def _a ( self : List[Any] , _snake_case : str ):
"""simple docstring"""
A__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
| 52
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 1
|
def A ( __UpperCamelCase ) -> int:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A__ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 1
|
import math
def A ( __UpperCamelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( __UpperCamelCase = 0.1 ) -> int:
A__ = 3
A__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 1
|
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def A ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 52
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 1
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 1
|
import os
def A ( ) -> Any:
with open(os.path.dirname(__UpperCamelCase ) + '/grid.txt' ) as f:
A__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
A__ = 0
# right
for i in range(20 ):
for j in range(17 ):
A__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A__ = temp
# down
for i in range(17 ):
for j in range(20 ):
A__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
A__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 52
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52
| 1
|
import torch
from transformers import AutoModel
class __lowerCAmelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : int="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(_snake_case , self ).__init__()
A__ = AutoModel.from_pretrained(_snake_case , return_dict=_snake_case )
A__ = torch.nn.CosineSimilarity(3 , 1E-08 )
A__ = torch.nn.Softmax(dim=1 )
def _a ( self : Any , **_snake_case : Dict ):
"""simple docstring"""
return self.bert(**_snake_case ).last_hidden_state
def _a ( self : Optional[int] , _snake_case : int ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=_snake_case )
def _a ( self : str , _snake_case : Tuple , _snake_case : Tuple , _snake_case : List[Any]=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(_snake_case , _snake_case ) )
def _a ( self : Any , _snake_case : int , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = W_supports['sizes'].tolist()
A__ = W_supports['start_token_id'].item()
A__ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
A__ = self.BERT(**_snake_case )
A__ = self.BERT(**_snake_case )
A__ = None
A__ = None
A__ = W_supports['input_ids'] == start_token_id
A__ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(_snake_case ):
if i == 0:
A__ = 0
else:
A__ = support_sizes[i - 1]
A__ = S[s : s + size][start_token_masks[s : s + size]]
A__ = S[s : s + size][end_token_masks[s : s + size]]
A__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
A__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
A__ = torch.vstack((p_starts, p_start) )
A__ = torch.vstack((p_ends, p_end) )
else:
A__ = p_start
A__ = p_end
return p_starts, p_ends
| 52
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.