code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = IFInpaintingSuperResolutionPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCamelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : Tuple =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : str =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : str =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : List[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : str ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 88 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ = """transformer"""
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase_ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : int=99 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Union[str, Any]=None , ):
'''simple docstring'''
lowercase : Any =parent
lowercase : Tuple =13
lowercase : List[Any] =7
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : Optional[Any] =True
lowercase : Optional[Any] =True
lowercase : int =99
lowercase : Dict =384
lowercase : List[str] =2
lowercase : Any =4
lowercase : int =37
lowercase : int ='''gelu'''
lowercase : List[Any] =0.1
lowercase : Any =0.1
lowercase : Dict =512
lowercase : List[Any] =16
lowercase : Optional[int] =2
lowercase : Optional[Any] =0.02
lowercase : Dict =3
lowercase : List[str] =4
lowercase : int =128
lowercase : List[str] =2
lowercase : int =9
lowercase : Tuple =1
lowercase : str =None
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_input_mask:
lowercase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[str] =None
if self.use_token_type_ids:
lowercase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Optional[Any] =None
lowercase : Any =None
lowercase : Union[str, Any] =None
if self.use_labels:
lowercase : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : Tuple =TFConvBertModel(config=UpperCAmelCase__ )
lowercase : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Dict =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =TFConvBertForMaskedLM(config=UpperCAmelCase__ )
lowercase : str ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : str =self.num_labels
lowercase : Optional[Any] =TFConvBertForSequenceClassification(config=UpperCAmelCase__ )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[int] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Optional[Any] =self.num_choices
lowercase : List[Any] =TFConvBertForMultipleChoice(config=UpperCAmelCase__ )
lowercase : Optional[int] =tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
lowercase : str ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Optional[int] =self.num_labels
lowercase : Optional[Any] =TFConvBertForTokenClassification(config=UpperCAmelCase__ )
lowercase : Optional[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : int =TFConvBertForQuestionAnswering(config=UpperCAmelCase__ )
lowercase : Any ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Any =model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Any =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : str =config_and_inputs
lowercase : Dict ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Tuple =TFConvBertModelTester(self )
lowercase : Any =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : List[str] =True
lowercase : str =True
if hasattr(UpperCAmelCase__ , '''use_cache''' ):
lowercase : List[Any] =True
lowercase : Tuple =getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowercase : Dict =getattr(self.model_tester , '''key_length''' , UpperCAmelCase__ )
for model_class in self.all_model_classes:
lowercase : Tuple =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : List[str] =model_class(UpperCAmelCase__ )
lowercase : int =len(model(UpperCAmelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ , saved_model=UpperCAmelCase__ )
lowercase : Optional[int] =os.path.join(UpperCAmelCase__ , '''saved_model''' , '''1''' )
lowercase : Any =tf.keras.models.load_model(UpperCAmelCase__ )
lowercase : int =model(UpperCAmelCase__ )
if self.is_encoder_decoder:
lowercase : List[Any] =outputs['''encoder_hidden_states''']
lowercase : Optional[Any] =outputs['''encoder_attentions''']
else:
lowercase : Dict =outputs['''hidden_states''']
lowercase : Union[str, Any] =outputs['''attentions''']
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
lowercase : Optional[int] =getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Union[str, Any] =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Dict =True
lowercase : Optional[int] =getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
lowercase : Optional[Any] =getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowercase : Dict =getattr(self.model_tester , '''key_length''' , UpperCAmelCase__ )
lowercase : List[Any] =getattr(self.model_tester , '''key_length''' , UpperCAmelCase__ )
def check_decoder_attentions_output(UpperCAmelCase__ : Union[str, Any] ):
lowercase : str =len(UpperCAmelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowercase : Tuple =outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase__ : int ):
lowercase : Any =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowercase : int =True
lowercase : str =False
lowercase : int =model_class(UpperCAmelCase__ )
lowercase : Dict =model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Tuple =len(UpperCAmelCase__ )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
if self.is_encoder_decoder:
lowercase : Tuple =model_class(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_decoder_attentions_output(UpperCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase : Optional[int] =True
lowercase : Optional[Any] =model_class(UpperCAmelCase__ )
lowercase : List[Any] =model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
# Check attention is always last and order is fine
lowercase : Any =True
lowercase : Dict =True
lowercase : Optional[Any] =model_class(UpperCAmelCase__ )
lowercase : Any =model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase__ )[0]
lowercase : Any =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase__ )
lowercase : int =tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 )
| 88 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
UpperCamelCase_ = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
UpperCamelCase_ = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
UpperCamelCase_ = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Tuple="binary" , UpperCAmelCase__ : Any=None ):
'''simple docstring'''
lowercase : List[str] =fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 88 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCamelCase_ = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
UpperCamelCase_ = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
UpperCamelCase_ = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
UpperCamelCase_ = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
UpperCamelCase_ = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any=[1, 10, 100] , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=3.0 ):
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=UpperCAmelCase__ ) as executor:
lowercase : Optional[int] =[]
lowercase : Optional[int] =Counter()
lowercase : Dict =0
lowercase : Any =defaultdict(UpperCAmelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ):
for candidate in candidates:
lowercase : Tuple =candidate + '''\n''' + test_case
lowercase : Union[str, Any] =(test_program, timeout, task_id, completion_id[task_id])
lowercase : List[str] =executor.submit(UpperCAmelCase__ , *UpperCAmelCase__ )
futures.append(UpperCAmelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCAmelCase__ ):
lowercase : List[str] =future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
lowercase , lowercase : Dict =[], []
for result in results.values():
result.sort()
lowercase : Any =[r[1]['''passed'''] for r in result]
total.append(len(UpperCAmelCase__ ) )
correct.append(sum(UpperCAmelCase__ ) )
lowercase : Union[str, Any] =np.array(UpperCAmelCase__ )
lowercase : Dict =np.array(UpperCAmelCase__ )
lowercase : Any =k
lowercase : Optional[Any] ={F'''pass@{k}''': estimate_pass_at_k(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[Any] ) -> Optional[Any]:
def estimator(__magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__magic_name__ , __magic_name__ ):
lowercase : Dict =itertools.repeat(__magic_name__ , len(__magic_name__ ) )
else:
assert len(__magic_name__ ) == len(__magic_name__ )
lowercase : Tuple =iter(__magic_name__ )
return np.array([estimator(int(__magic_name__ ) , int(__magic_name__ ) , __magic_name__ ) for n, c in zip(__magic_name__ , __magic_name__ )] )
| 88 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
UpperCamelCase_ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCamelCase_ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCamelCase_ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCamelCase_ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCamelCase_ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCamelCase_ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCamelCase_ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCamelCase_ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 88 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : int ={}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
lowercase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
with open(__magic_name__ ) as f:
lowercase : Optional[int] =f.read(1 )
lowercase : List[Any] =start_node
lowercase : List[Any] =[]
lowercase : str =start_node
lowercase : str =0
while visiting not in first_solution:
lowercase : Optional[int] =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
lowercase : List[Any] =k[1]
lowercase : str =k[0]
first_solution.append(__magic_name__ )
lowercase : Any =distance_of_first_solution + int(__magic_name__ )
lowercase : Optional[int] =best_node
first_solution.append(__magic_name__ )
lowercase : str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple:
lowercase : Tuple =[]
for n in solution[1:-1]:
lowercase : Dict =solution.index(__magic_name__ )
for kn in solution[1:-1]:
lowercase : Tuple =solution.index(__magic_name__ )
if n == kn:
continue
lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ )
lowercase : Optional[int] =kn
lowercase : List[Any] =n
lowercase : List[Any] =0
for k in _tmp[:-1]:
lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[int] =distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : str =1
lowercase : List[Any] =first_solution
lowercase : Any =[]
lowercase : str =distance_of_first_solution
lowercase : str =solution
while count <= iters:
lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ )
lowercase : Dict =0
lowercase : int =neighborhood[index_of_best_solution]
lowercase : Optional[int] =len(__magic_name__ ) - 1
lowercase : List[Any] =False
while not found:
lowercase : List[Any] =0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
lowercase : List[str] =best_solution[i]
lowercase : Dict =solution[i]
break
lowercase : Any =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : str =True
lowercase : int =best_solution[:-1]
lowercase : Any =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : Optional[int] =cost
lowercase : str =solution
else:
lowercase : Optional[int] =index_of_best_solution + 1
lowercase : List[Any] =neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
lowercase : Optional[int] =count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple:
lowercase : List[str] =generate_neighbours(args.File )
lowercase , lowercase : Optional[Any] =generate_first_solution(
args.File , __magic_name__ )
lowercase , lowercase : int =tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Any=10 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : int=32 * 8 , UpperCAmelCase__ : Union[str, Any]=32 * 8 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Any=64 , ):
'''simple docstring'''
lowercase : List[Any] =parent
lowercase : Optional[int] =batch_size
lowercase : Optional[int] =is_training
lowercase : str =use_auxiliary_loss
lowercase : int =num_queries
lowercase : Dict =num_channels
lowercase : str =min_size
lowercase : Optional[int] =max_size
lowercase : Any =num_labels
lowercase : Any =hidden_dim
lowercase : List[Any] =hidden_dim
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase__ )
lowercase : str =torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase__ )
lowercase : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase__ ) > 0.5
).float()
lowercase : Any =(torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase__ ) > 0.5).long()
lowercase : Tuple =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowercase : List[str] =self.num_queries
lowercase : Tuple =self.num_labels
lowercase : Tuple =[1, 1, 1, 1]
lowercase : Tuple =self.num_channels
lowercase : Any =64
lowercase : List[Any] =128
lowercase : Union[str, Any] =self.hidden_dim
lowercase : str =self.hidden_dim
lowercase : Dict =self.hidden_dim
return config
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase , lowercase : int =self.prepare_config_and_inputs()
lowercase : List[str] ={'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : str =output.encoder_hidden_states
lowercase : Optional[Any] =output.pixel_decoder_hidden_states
lowercase : List[Any] =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) , config.decoder_layers )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ):
'''simple docstring'''
with torch.no_grad():
lowercase : int =MaskaFormerModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : int =MaskaFormerForUniversalSegmentation(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
def comm_check_on_output(UpperCAmelCase__ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase : Optional[Any] =model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ )
lowercase : List[Any] =model(UpperCAmelCase__ )
comm_check_on_output(UpperCAmelCase__ )
lowercase : str =model(
pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ )
comm_check_on_output(UpperCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase_ = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =MaskaFormerModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase , lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCAmelCase__ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Tuple =model_class(UpperCAmelCase__ )
lowercase : int =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] =[*signature.parameters.keys()]
lowercase : Tuple =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase : Dict =MaskaFormerModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =(self.model_tester.min_size,) * 2
lowercase : int ={
'''pixel_values''': torch.randn((2, 3, *size) , device=UpperCAmelCase__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=UpperCAmelCase__ ),
'''class_labels''': torch.zeros(2 , 10 , device=UpperCAmelCase__ ).long(),
}
lowercase : Tuple =self.model_tester.get_config()
lowercase : List[Any] =MaskaFormerForUniversalSegmentation(UpperCAmelCase__ ).to(UpperCAmelCase__ )
lowercase : List[str] =model(**UpperCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase , lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
lowercase : Any =model(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase : Optional[Any] =self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase : Dict =self.model_tester.prepare_config_and_inputs()
lowercase : str =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
lowercase : Union[str, Any] =model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ ).loss
loss.backward()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase : Dict =self.model_tester.prepare_config_and_inputs()
lowercase : str =True
lowercase : Tuple =True
lowercase : Union[str, Any] =model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
model.train()
lowercase : int =model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ )
lowercase : Optional[Any] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase : int =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase : Optional[Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase : Dict =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase_ = 1E-4
def _lowerCAmelCase ( ) -> List[str]:
lowercase : List[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : int =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ )
lowercase : Dict =self.default_image_processor
lowercase : Optional[int] =prepare_img()
lowercase : Any =image_processor(UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
lowercase : Optional[Any] =inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
lowercase : Union[str, Any] =model(**UpperCAmelCase__ )
lowercase : List[Any] =torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
lowercase : Optional[int] =torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
lowercase : int =torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ ).eval()
lowercase : str =self.default_image_processor
lowercase : str =prepare_img()
lowercase : Tuple =image_processor(UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
lowercase : List[str] =inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
lowercase : Optional[Any] =model(**UpperCAmelCase__ )
# masks_queries_logits
lowercase : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase : List[Any] =[
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
lowercase : Union[str, Any] =torch.tensor(UpperCAmelCase__ ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
# class_queries_logits
lowercase : Dict =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase : Optional[Any] =torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : List[str] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ ).eval()
lowercase : int =self.default_image_processor
lowercase : Tuple =image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowercase : Tuple =inputs['''pixel_values'''].to(UpperCAmelCase__ )
lowercase : Dict =[el.to(UpperCAmelCase__ ) for el in inputs['''mask_labels''']]
lowercase : Union[str, Any] =[el.to(UpperCAmelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowercase : List[str] =model(**UpperCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : int , UpperCAmelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , UpperCAmelCase__ : Optional[NamedSplit] = None , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : List[str] =path_or_paths
lowercase : List[str] =split if split or isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else '''train'''
lowercase : str =features
lowercase : Union[str, Any] =cache_dir
lowercase : Union[str, Any] =keep_in_memory
lowercase : List[str] =streaming
lowercase : Optional[Any] =num_proc
lowercase : str =kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : int , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : List[Any] =features
lowercase : List[Any] =cache_dir
lowercase : List[Any] =keep_in_memory
lowercase : Tuple =streaming
lowercase : Tuple =num_proc
lowercase : int =kwargs
@abstractmethod
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
| 88 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BioGptTokenizer
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : str ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase : Any ='''lower'''
lowercase : int =['''low''', '''er</w>''']
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =tokens + ['''<unk>''']
lowercase : Any =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 88 | 1 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _lowerCAmelCase ( __magic_name__ : str = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def _lowerCAmelCase ( __magic_name__ : str = "" ) -> bool:
if len(__magic_name__ ) == 0:
return True
lowercase : Union[str, Any] =input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase : dict[str, int] ={}
for character in lower_case_input_str:
lowercase : Dict =character_freq_dict.get(__magic_name__ , 0 ) + 1
lowercase : str =0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCAmelCase ( __magic_name__ : str = "" ) -> None:
print('''\nFor string = ''' , __magic_name__ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__magic_name__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__magic_name__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
UpperCamelCase_ = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
UpperCamelCase_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 88 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase_ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
UpperCamelCase_ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
UpperCamelCase_ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Dict ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowercase : str =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowercase : Dict =evaluate(dataset=UpperCAmelCase__ , predictions=UpperCAmelCase__ )
return score
| 88 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
lowercase : Optional[Any] =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> str:
lowercase , lowercase : int =emb.weight.shape
lowercase : str =nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase : List[Any] =emb.weight.data
return lin_layer
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Dict=None ) -> int:
lowercase : Dict ={}
for old_key in state_dict.keys():
lowercase : List[Any] =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowercase : Tuple =key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
lowercase : List[str] =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowercase : Tuple =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowercase : int =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowercase : Union[str, Any] =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowercase : Any =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowercase : Optional[int] =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowercase : Optional[Any] =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowercase : Optional[int] =state_dict[old_key]
return new_dict
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : str = WEIGHTS_NAME ) -> str:
lowercase : Tuple =[]
lowercase : Union[str, Any] =0
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
for expert in range(__magic_name__ ):
lowercase : List[Any] =switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__magic_name__ ):
lowercase : str =torch.load(__magic_name__ )['''model''']
remove_ignore_keys_(__magic_name__ )
lowercase : Union[str, Any] =rename_fairseq_keys(__magic_name__ , __magic_name__ )
lowercase : Any =os.path.join(
__magic_name__ , weights_name.replace('''.bin''' , f'''-{len(__magic_name__ )+1:05d}-of-???.bin''' ) )
torch.save(__magic_name__ , __magic_name__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__magic_name__ )[0]].dtype )
# Add the last block
lowercase : Any =os.path.join(__magic_name__ , weights_name.replace('''.bin''' , f'''-{len(__magic_name__ )+1:05d}-of-???.bin''' ) )
lowercase : Tuple =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__magic_name__ )
lowercase : Union[str, Any] =rename_fairseq_keys(__magic_name__ , __magic_name__ )
lowercase : Any =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__magic_name__ ) == 1:
lowercase : int =os.path.join(__magic_name__ , __magic_name__ )
torch.save(__magic_name__ , __magic_name__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__magic_name__ , __magic_name__ )
# Otherwise, let's build the index
lowercase : Optional[Any] ={}
for idx, shard in enumerate(__magic_name__ ):
lowercase : str =weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(__magic_name__ ):05d}.bin''' )
lowercase : List[Any] =os.path.join(__magic_name__ , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
for key in shard:
lowercase : Optional[int] =shard_file
# Add the metadata
lowercase : Optional[int] ={'''total_size''': total_size}
lowercase : str ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__magic_name__ , __magic_name__ ) , '''w''' , encoding='''utf-8''' ) as f:
lowercase : Union[str, Any] =json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + '''\n'''
f.write(__magic_name__ )
return metadata, index
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ , UpperCamelCase_ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCamelCase_ = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCamelCase_ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 | 1 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Tuple ) -> Optional[int]:
# Initialise PyTorch model
lowercase : List[str] =BigBirdConfig.from_json_file(__magic_name__ )
print(f'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
lowercase : str =BigBirdForQuestionAnswering(__magic_name__ )
else:
lowercase : Optional[int] =BigBirdForPreTraining(__magic_name__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__magic_name__ , __magic_name__ , is_trivia_qa=__magic_name__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 88 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'vision-encoder-decoder'
lowerCamelCase_ = True
def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase : Optional[Any] =kwargs.pop('''encoder''' )
lowercase : List[Any] =encoder_config.pop('''model_type''' )
lowercase : List[str] =kwargs.pop('''decoder''' )
lowercase : Dict =decoder_config.pop('''model_type''' )
lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =True
@classmethod
def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase : int =True
lowercase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =copy.deepcopy(self.__dict__ )
lowercase : Union[str, Any] =self.encoder.to_dict()
lowercase : Union[str, Any] =self.decoder.to_dict()
lowercase : int =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =OrderedDict()
lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
lowercase : Optional[Any] =OrderedDict()
lowercase : List[Any] =super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape
lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase : List[str] =dummy_input.pop('''input_ids''' )
lowercase : Tuple =dummy_input.pop('''attention_mask''' )
lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ )
return common_inputs
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ):
'''simple docstring'''
lowercase : List[Any] =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : List[str] ) -> Optional[Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Any=0 ) -> Optional[int]:
return sorted(__magic_name__ , key=lambda __magic_name__ : x[column] )
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Dict=float('''inf''' ) ) -> str:
for i in range(points_counts - 1 ):
for j in range(i + 1 , __magic_name__ ):
lowercase : Optional[int] =euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase : str =current_dis
return min_dis
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any=float('''inf''' ) ) -> Union[str, Any]:
for i in range(min(6 , points_counts - 1 ) , __magic_name__ ):
for j in range(max(0 , i - 6 ) , __magic_name__ ):
lowercase : int =euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase : Dict =current_dis
return min_dis
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Tuple:
# base case
if points_counts <= 3:
return dis_between_closest_pair(__magic_name__ , __magic_name__ )
# recursion
lowercase : Any =points_counts // 2
lowercase : int =closest_pair_of_points_sqr(
__magic_name__ , points_sorted_on_y[:mid] , __magic_name__ )
lowercase : Optional[int] =closest_pair_of_points_sqr(
__magic_name__ , points_sorted_on_y[mid:] , points_counts - mid )
lowercase : Optional[int] =min(__magic_name__ , __magic_name__ )
lowercase : Union[str, Any] =[]
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__magic_name__ )
lowercase : Any =dis_between_closest_in_strip(
__magic_name__ , len(__magic_name__ ) , __magic_name__ )
return min(__magic_name__ , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> List[str]:
lowercase : Union[str, Any] =column_based_sort(__magic_name__ , column=0 )
lowercase : int =column_based_sort(__magic_name__ , column=1 )
return (
closest_pair_of_points_sqr(
__magic_name__ , __magic_name__ , __magic_name__ )
) ** 0.5
if __name__ == "__main__":
UpperCamelCase_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 88 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Any ='''laion/clap-htsat-unfused'''
lowercase : int =tempfile.mkdtemp()
def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.get_tokenizer()
lowercase : str =self.get_feature_extractor()
lowercase : List[str] =ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
lowercase : str =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase : int =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase : str =self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
lowercase : int =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : str =self.get_feature_extractor()
lowercase : Any =self.get_tokenizer()
lowercase : List[Any] =ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
lowercase : int =floats_list((3, 1000) )
lowercase : str =feature_extractor(UpperCAmelCase__ , return_tensors='''np''' )
lowercase : int =processor(audios=UpperCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.get_feature_extractor()
lowercase : Optional[Any] =self.get_tokenizer()
lowercase : Optional[int] =ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
lowercase : str ='''This is a test string'''
lowercase : Optional[Any] =processor(text=UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =self.get_feature_extractor()
lowercase : Optional[Any] =self.get_tokenizer()
lowercase : Any =ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
lowercase : Optional[int] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : str =processor.batch_decode(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =self.get_feature_extractor()
lowercase : str =self.get_tokenizer()
lowercase : List[str] =ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
| 88 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ) -> tuple:
lowercase : List[str] =namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]:
lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase : List[str] =json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowercase : Tuple =args.output + '''.pt'''
lowercase : int =OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowercase : int =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase : int =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase : Union[str, Any] =8
lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase : Union[str, Any] =key_name[-9:-7]
for i in range(16 ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase : Any =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
lowercase : Dict =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Any =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
lowercase : int =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
lowercase : int =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Dict =state[:, 0, :, :]
lowercase : Tuple =state[:, 1, :, :]
lowercase : List[Any] =state[:, 2, :, :]
lowercase : Optional[int] =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase : Dict =torch.tensor(__magic_name__ )
lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase : Tuple =torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase : List[Any] =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase : Optional[Any] ='''model.%s.weight''' % nlayer
lowercase : Optional[int] =vnp.copy() # same in embedded
lowercase : List[Any] =torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
lowercase : Tuple ='''lm_head.weight'''
lowercase : str =vnp.copy() # same in embedded
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
lowercase : List[str] ='''final_logits_bias'''
lowercase : Dict =vnp.copy() # same in embedded
lowercase : Tuple =state.reshape((1, -1) )
lowercase : Dict =torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
lowercase : Dict ='''model.last_project.weight'''
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
lowercase : List[Any] ='''model.last_project.bias'''
lowercase : str =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 88 | 1 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase_ = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
UpperCamelCase_ = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
UpperCamelCase_ = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[int] ) -> Optional[int]:
return float((preds == labels).mean() )
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Tuple:
lowercase : List[Any] =simple_accuracy(__magic_name__ , __magic_name__ )
lowercase : List[str] =float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> int:
lowercase : List[str] =np.array(__magic_name__ )
lowercase : List[str] =np.array(__magic_name__ )
lowercase : str =en_sentvecs.shape[0]
# mean centering
lowercase : Tuple =en_sentvecs - np.mean(__magic_name__ , axis=0 )
lowercase : str =in_sentvecs - np.mean(__magic_name__ , axis=0 )
lowercase : str =cdist(__magic_name__ , __magic_name__ , '''cosine''' )
lowercase : List[str] =np.array(range(__magic_name__ ) )
lowercase : Dict =sim.argsort(axis=1 )[:, :10]
lowercase : Optional[int] =np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 88 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = BigBirdTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''<s>'''
lowercase : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Any =self.get_rust_tokenizer()
lowercase : int ='''I was born in 92000, and this is falsé.'''
lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ )
lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''Hello World!'''
lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict =''' '''.join(UpperCAmelCase__ )
lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' )
lowercase : Dict =BigBirdModel(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 88 | 1 |
'''simple docstring'''
from ... import PretrainedConfig
UpperCamelCase_ = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCamelCase_ = 'nezha'
def __init__( self : Any , UpperCAmelCase__ : str=21128 , UpperCAmelCase__ : str=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : str=3072 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=512 , UpperCAmelCase__ : int=64 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =vocab_size
lowercase : Optional[int] =hidden_size
lowercase : Union[str, Any] =num_hidden_layers
lowercase : str =num_attention_heads
lowercase : Optional[Any] =hidden_act
lowercase : str =intermediate_size
lowercase : int =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : List[Any] =max_relative_position
lowercase : List[str] =type_vocab_size
lowercase : Optional[Any] =initializer_range
lowercase : Tuple =layer_norm_eps
lowercase : Optional[Any] =classifier_dropout
lowercase : Dict =use_cache
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 88 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int ) -> bool:
return str(__magic_name__ ) == str(__magic_name__ )[::-1]
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
return int(__magic_name__ ) + int(str(__magic_name__ )[::-1] )
def _lowerCAmelCase ( __magic_name__ : int = 10000 ) -> int:
lowercase : List[str] =[]
for num in range(1 , __magic_name__ ):
lowercase : Any =0
lowercase : Any =num
while iterations < 50:
lowercase : List[str] =sum_reverse(__magic_name__ )
iterations += 1
if is_palindrome(__magic_name__ ):
break
else:
lychrel_nums.append(__magic_name__ )
return len(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool:
lowercase : Optional[int] =first_str.lower().strip()
lowercase : Union[str, Any] =second_str.lower().strip()
# Remove whitespace
lowercase : Optional[int] =first_str.replace(''' ''' , '''''' )
lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__magic_name__ ) != len(__magic_name__ ):
return False
# Default values for count should be 0
lowercase : defaultdict[str, int] =defaultdict(__magic_name__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__magic_name__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input("""Enter the first string """).strip()
UpperCamelCase_ = input("""Enter the second string """).strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 88 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = DDIMPipeline
lowerCamelCase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase_ = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
lowerCamelCase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase_ = False
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Tuple =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
lowercase : Dict =DDIMScheduler()
lowercase : Tuple ={'''unet''': unet, '''scheduler''': scheduler}
return components
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : Tuple =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : Optional[Any] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Tuple ={
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any ='''cpu'''
lowercase : Dict =self.get_dummy_components()
lowercase : Any =self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : int =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : int =pipe(**UpperCAmelCase__ ).images
lowercase : Dict =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowercase : str =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowercase : Any =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[str] ='''google/ddpm-cifar10-32'''
lowercase : Optional[Any] =UNetaDModel.from_pretrained(UpperCAmelCase__ )
lowercase : Union[str, Any] =DDIMScheduler()
lowercase : List[str] =DDIMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
ddim.to(UpperCAmelCase__ )
ddim.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.manual_seed(0 )
lowercase : Tuple =ddim(generator=UpperCAmelCase__ , eta=0.0 , output_type='''numpy''' ).images
lowercase : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Optional[Any] =np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] ='''google/ddpm-ema-bedroom-256'''
lowercase : List[Any] =UNetaDModel.from_pretrained(UpperCAmelCase__ )
lowercase : Optional[int] =DDIMScheduler.from_pretrained(UpperCAmelCase__ )
lowercase : Optional[int] =DDIMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
ddpm.to(UpperCAmelCase__ )
ddpm.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Dict =torch.manual_seed(0 )
lowercase : int =ddpm(generator=UpperCAmelCase__ , output_type='''numpy''' ).images
lowercase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase : int =np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 88 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 0.0
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = jnp.floataa
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =[]
lowercase : Optional[int] =[]
for i in range(self.num_layers ):
lowercase : Optional[int] =self.in_channels if i == 0 else self.out_channels
lowercase : Union[str, Any] =FlaxResnetBlockaD(
in_channels=UpperCAmelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
lowercase : Union[str, Any] =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
lowercase : Optional[int] =resnets
lowercase : List[str] =attentions
if self.add_downsample:
lowercase : Optional[Any] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=True ):
'''simple docstring'''
lowercase : Optional[Any] =()
for resnet, attn in zip(self.resnets , self.attentions ):
lowercase : Tuple =resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
lowercase : List[str] =attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
lowercase : Union[str, Any] =self.downsamplers_a(UpperCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 0.0
lowerCamelCase_ = 1
lowerCamelCase_ = True
lowerCamelCase_ = jnp.floataa
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[str] =[]
for i in range(self.num_layers ):
lowercase : Union[str, Any] =self.in_channels if i == 0 else self.out_channels
lowercase : Optional[Any] =FlaxResnetBlockaD(
in_channels=UpperCAmelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
lowercase : int =resnets
if self.add_downsample:
lowercase : Any =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str]=True ):
'''simple docstring'''
lowercase : str =()
for resnet in self.resnets:
lowercase : str =resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
lowercase : str =self.downsamplers_a(UpperCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 0.0
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = jnp.floataa
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : str =[]
lowercase : Union[str, Any] =[]
for i in range(self.num_layers ):
lowercase : Optional[Any] =self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase : Optional[int] =self.prev_output_channel if i == 0 else self.out_channels
lowercase : Union[str, Any] =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
lowercase : Union[str, Any] =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
lowercase : Optional[int] =resnets
lowercase : Tuple =attentions
if self.add_upsample:
lowercase : List[Any] =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowercase : str =res_hidden_states_tuple[-1]
lowercase : int =res_hidden_states_tuple[:-1]
lowercase : Optional[Any] =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase : Dict =resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
lowercase : List[Any] =attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
if self.add_upsample:
lowercase : str =self.upsamplers_a(UpperCAmelCase__ )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 0.0
lowerCamelCase_ = 1
lowerCamelCase_ = True
lowerCamelCase_ = jnp.floataa
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Dict =[]
for i in range(self.num_layers ):
lowercase : List[str] =self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase : int =self.prev_output_channel if i == 0 else self.out_channels
lowercase : str =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
lowercase : List[Any] =resnets
if self.add_upsample:
lowercase : int =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
lowercase : Tuple =res_hidden_states_tuple[-1]
lowercase : Optional[int] =res_hidden_states_tuple[:-1]
lowercase : str =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase : List[Any] =resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
if self.add_upsample:
lowercase : Tuple =self.upsamplers_a(UpperCAmelCase__ )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 42
lowerCamelCase_ = 0.0
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = jnp.floataa
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
# there is always at least one resnet
lowercase : Optional[int] =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowercase : List[Any] =[]
for _ in range(self.num_layers ):
lowercase : Dict =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
lowercase : List[Any] =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
lowercase : Tuple =resnets
lowercase : Dict =attentions
def __call__( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=True ):
'''simple docstring'''
lowercase : List[str] =self.resnets[0](UpperCAmelCase__ , UpperCAmelCase__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowercase : Any =attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
lowercase : Tuple =resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
return hidden_states
| 88 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
from math import pi, sqrt
def _lowerCAmelCase ( __magic_name__ : float ) -> float:
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_7_1.5:
raise OverflowError('''math range error''' )
elif num - int(__magic_name__ ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(__magic_name__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _lowerCAmelCase ( ) -> None:
assert gamma(0.5 ) == sqrt(__magic_name__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = 1.0
while num:
UpperCamelCase_ = float(input("""Gamma of: """))
print(f'''gamma({num}) = {gamma(num)}''')
print("""\nEnter 0 to exit...""")
| 88 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Union[str, Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Tuple=10 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Optional[int]="divided_space_time" , UpperCAmelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase : int =parent
lowercase : Tuple =batch_size
lowercase : Optional[Any] =image_size
lowercase : Tuple =num_channels
lowercase : List[str] =patch_size
lowercase : str =num_frames
lowercase : Tuple =is_training
lowercase : List[str] =use_labels
lowercase : int =hidden_size
lowercase : Union[str, Any] =num_hidden_layers
lowercase : Optional[int] =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : List[Any] =hidden_act
lowercase : Any =hidden_dropout_prob
lowercase : Tuple =attention_probs_dropout_prob
lowercase : List[Any] =attention_type
lowercase : Optional[Any] =initializer_range
lowercase : Dict =scope
lowercase : Dict =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase : List[str] =(image_size // patch_size) ** 2
lowercase : Union[str, Any] =(num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Optional[Any] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase : Union[str, Any] =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.num_labels )
lowercase : int =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : int =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase : int =self.num_labels
return config
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : List[Any] =TimesformerModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : Union[str, Any] =TimesformerForVideoClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(UpperCAmelCase__ )
# verify the logits shape
lowercase : Union[str, Any] =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[Any] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCamelCase_ = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : List[Any] =TimesformerModelTester(self )
lowercase : str =ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
'''simple docstring'''
lowercase : int =copy.deepcopy(UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowercase : Optional[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any =model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : Optional[int] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : int =model_class(UpperCAmelCase__ )
lowercase : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] =[*signature.parameters.keys()]
lowercase : Any =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[Any] =TimesformerModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[Any] =True
for model_class in self.all_model_classes:
lowercase : List[Any] =self.model_tester.seq_length
lowercase : List[Any] =self.model_tester.num_frames
lowercase : Dict =True
lowercase : int =False
lowercase : Optional[Any] =True
lowercase : Optional[int] =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Any =outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase : Optional[int] =True
lowercase : Optional[Any] =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : Dict =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Dict =outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase : Optional[int] =len(UpperCAmelCase__ )
# Check attention is always last and order is fine
lowercase : str =True
lowercase : Union[str, Any] =True
lowercase : Optional[int] =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCAmelCase__ ) )
lowercase : Union[str, Any] =outputs.attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
lowercase : str =model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase : Dict =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase : Optional[int] =outputs.hidden_states
lowercase : List[str] =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
lowercase : Optional[int] =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : List[Any] =True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Union[str, Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowercase : str =np.load(__magic_name__ )
return list(__magic_name__ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[int] =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
UpperCAmelCase__ )
lowercase : int =self.default_image_processor
lowercase : Tuple =prepare_video()
lowercase : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase : Optional[int] =model(**UpperCAmelCase__ )
# verify the logits
lowercase : Any =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowercase : List[str] =torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 88 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ = """transformer"""
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase_ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 | 1 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
UpperCamelCase_ = HUGGINGFACE_HUB_CACHE
UpperCamelCase_ = """config.json"""
UpperCamelCase_ = """diffusion_pytorch_model.bin"""
UpperCamelCase_ = """diffusion_flax_model.msgpack"""
UpperCamelCase_ = """model.onnx"""
UpperCamelCase_ = """diffusion_pytorch_model.safetensors"""
UpperCamelCase_ = """weights.pb"""
UpperCamelCase_ = """https://huggingface.co"""
UpperCamelCase_ = default_cache_path
UpperCamelCase_ = """diffusers_modules"""
UpperCamelCase_ = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
UpperCamelCase_ = ["""fp16""", """non-ema"""]
UpperCamelCase_ = """.self_attn"""
| 88 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _lowerCAmelCase ( __magic_name__ : int ) -> List[Any]:
lowercase : str =384
lowercase : Optional[int] =7
if "tiny" in model_name:
lowercase : Optional[Any] =96
lowercase : Optional[int] =(2, 2, 6, 2)
lowercase : int =(3, 6, 12, 24)
elif "small" in model_name:
lowercase : Any =96
lowercase : Dict =(2, 2, 18, 2)
lowercase : List[str] =(3, 6, 12, 24)
elif "base" in model_name:
lowercase : Optional[Any] =128
lowercase : Optional[int] =(2, 2, 18, 2)
lowercase : Optional[Any] =(4, 8, 16, 32)
lowercase : Optional[Any] =12
lowercase : List[Any] =512
elif "large" in model_name:
lowercase : str =192
lowercase : Any =(2, 2, 18, 2)
lowercase : Optional[int] =(6, 12, 24, 48)
lowercase : str =12
lowercase : str =768
# set label information
lowercase : Union[str, Any] =150
lowercase : str ='''huggingface/label-files'''
lowercase : Union[str, Any] ='''ade20k-id2label.json'''
lowercase : Optional[Any] =json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : List[str] ={int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any ={v: k for k, v in idalabel.items()}
lowercase : List[Any] =SwinConfig(
embed_dim=__magic_name__ , depths=__magic_name__ , num_heads=__magic_name__ , window_size=__magic_name__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowercase : Dict =UperNetConfig(
backbone_config=__magic_name__ , auxiliary_in_channels=__magic_name__ , num_labels=__magic_name__ , idalabel=__magic_name__ , labelaid=__magic_name__ , )
return config
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Optional[Any]:
lowercase : Optional[int] =[]
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ) -> int:
lowercase : Union[str, Any] =dct.pop(__magic_name__ )
lowercase : Tuple =val
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : str ) -> Optional[int]:
lowercase : Dict =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase : Any =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase : Optional[int] =state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
lowercase : str =state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase : Dict =in_proj_weight[:dim, :]
lowercase : Any =in_proj_bias[: dim]
lowercase : List[str] =in_proj_weight[
dim : dim * 2, :
]
lowercase : List[Any] =in_proj_bias[
dim : dim * 2
]
lowercase : Dict =in_proj_weight[
-dim :, :
]
lowercase : List[Any] =in_proj_bias[-dim :]
# fmt: on
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> List[Any]:
lowercase , lowercase : List[Any] =x.shape
lowercase : Optional[int] =x.reshape(__magic_name__ , 4 , in_channel // 4 )
lowercase : List[str] =x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__magic_name__ , __magic_name__ )
return x
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> int:
lowercase , lowercase : List[str] =x.shape
lowercase : Tuple =x.reshape(__magic_name__ , in_channel // 4 , 4 )
lowercase : str =x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__magic_name__ , __magic_name__ )
return x
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> str:
lowercase : Dict =x.shape[0]
lowercase : Any =x.reshape(4 , in_channel // 4 )
lowercase : Union[str, Any] =x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__magic_name__ )
return x
def _lowerCAmelCase ( __magic_name__ : Any ) -> Any:
lowercase : Any =x.shape[0]
lowercase : str =x.reshape(in_channel // 4 , 4 )
lowercase : Dict =x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__magic_name__ )
return x
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : int ) -> Union[str, Any]:
lowercase : Union[str, Any] ={
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowercase : int =model_name_to_url[model_name]
lowercase : Any =torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' , file_name=__magic_name__ )[
'''state_dict'''
]
for name, param in state_dict.items():
print(__magic_name__ , param.shape )
lowercase : str =get_upernet_config(__magic_name__ )
lowercase : Optional[Any] =UperNetForSemanticSegmentation(__magic_name__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase : Dict =state_dict.pop(__magic_name__ )
if "bn" in key:
lowercase : Optional[Any] =key.replace('''bn''' , '''batch_norm''' )
lowercase : int =val
# rename keys
lowercase : List[Any] =create_rename_keys(__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase : List[Any] =reverse_correct_unfold_reduction_order(__magic_name__ )
if "norm" in key:
lowercase : str =reverse_correct_unfold_norm_order(__magic_name__ )
model.load_state_dict(__magic_name__ )
# verify on image
lowercase : Union[str, Any] ='''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowercase : List[Any] =Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert('''RGB''' )
lowercase : str =SegformerImageProcessor()
lowercase : List[str] =processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowercase : Tuple =model(__magic_name__ )
lowercase : Union[str, Any] =outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase : Union[str, Any] =torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
lowercase : str =torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
lowercase : List[str] =torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
lowercase : Optional[int] =torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 88 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Tuple=37 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase : List[str] =parent
lowercase : Tuple =batch_size
lowercase : Any =seq_length
lowercase : str =is_training
lowercase : str =use_token_type_ids
lowercase : int =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Optional[int] =num_hidden_layers
lowercase : List[str] =num_attention_heads
lowercase : List[Any] =intermediate_size
lowercase : Union[str, Any] =hidden_act
lowercase : Optional[Any] =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : int =max_position_embeddings
lowercase : Optional[int] =type_vocab_size
lowercase : Any =type_sequence_label_size
lowercase : Tuple =initializer_range
lowercase : Tuple =num_labels
lowercase : Union[str, Any] =num_choices
lowercase : Any =scope
lowercase : Tuple =self.vocab_size - 1
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_token_type_ids:
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : str =None
lowercase : str =None
lowercase : Optional[int] =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : str =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase : Optional[int] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , *UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Optional[Any] =OpenAIGPTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : Tuple =OpenAIGPTLMHeadModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Tuple =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , *UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : str =OpenAIGPTDoubleHeadsModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Any =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , *UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Union[str, Any] =OpenAIGPTForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Any =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase_ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=False ):
'''simple docstring'''
lowercase : str =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase : List[str] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
lowercase : List[Any] =inputs_dict['''labels''']
lowercase : Optional[int] =inputs_dict['''labels''']
lowercase : Union[str, Any] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCAmelCase__ , )
lowercase : str =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[int] =OpenAIGPTModelTester(self )
lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=37 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[Any] =OpenAIGPTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(UpperCAmelCase__ )
lowercase : Optional[int] =torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCAmelCase__ ) # the president is
lowercase : Optional[int] =[
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase : Optional[int] =model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase__ )
| 88 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar("""T""")
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
return (position - 1) // 2
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
return (2 * position) + 1
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
return (2 * position) + 2
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self : Optional[int] ):
'''simple docstring'''
lowercase : list[tuple[T, int]] =[]
lowercase : dict[T, int] ={}
lowercase : int =0
def __len__( self : List[Any] ):
'''simple docstring'''
return self.elements
def __repr__( self : List[str] ):
'''simple docstring'''
return str(self.heap )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# Check if the priority queue is empty
return self.elements == 0
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : T , UpperCAmelCase__ : int ):
'''simple docstring'''
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
lowercase : Any =self.elements
self.elements += 1
self._bubble_up(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowercase , lowercase : List[Any] =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowercase , lowercase : Optional[int] =self.heap[0]
self._bubble_down(UpperCAmelCase__ )
return elem
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : T , UpperCAmelCase__ : int ):
'''simple docstring'''
# Update the weight of the given key
lowercase : str =self.position_map[elem]
lowercase : Any =(elem, weight)
if position > 0:
lowercase : str =get_parent_position(UpperCAmelCase__ )
lowercase , lowercase : List[Any] =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCAmelCase__ )
else:
self._bubble_down(UpperCAmelCase__ )
else:
self._bubble_down(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : T ):
'''simple docstring'''
# Place a node at the proper position (upward movement) [to be used internally
# only]
lowercase : Optional[int] =self.position_map[elem]
if curr_pos == 0:
return None
lowercase : Optional[int] =get_parent_position(UpperCAmelCase__ )
lowercase , lowercase : str =self.heap[curr_pos]
lowercase , lowercase : List[str] =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_up(UpperCAmelCase__ )
return None
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : T ):
'''simple docstring'''
# Place a node at the proper position (downward movement) [to be used
# internally only]
lowercase : List[str] =self.position_map[elem]
lowercase , lowercase : Tuple =self.heap[curr_pos]
lowercase : Union[str, Any] =get_child_left_position(UpperCAmelCase__ )
lowercase : str =get_child_right_position(UpperCAmelCase__ )
if child_left_position < self.elements and child_right_position < self.elements:
lowercase , lowercase : int =self.heap[child_left_position]
lowercase , lowercase : int =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_down(UpperCAmelCase__ )
if child_left_position < self.elements:
lowercase , lowercase : int =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_down(UpperCAmelCase__ )
else:
return None
if child_right_position < self.elements:
lowercase , lowercase : Tuple =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_down(UpperCAmelCase__ )
return None
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
'''simple docstring'''
# Swap the nodes at the given positions
lowercase : Optional[Any] =self.heap[nodea_pos][0]
lowercase : str =self.heap[nodea_pos][0]
lowercase , lowercase : int =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowercase : Dict =nodea_pos
lowercase : Any =nodea_pos
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self : Optional[int] ):
'''simple docstring'''
lowercase : dict[T, dict[T, int]] ={}
lowercase : int =0
def __repr__( self : Dict ):
'''simple docstring'''
return str(self.connections )
def __len__( self : int ):
'''simple docstring'''
return self.nodes
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : T ):
'''simple docstring'''
# Add a node in the graph if it is not in the graph
if node not in self.connections:
lowercase : Any ={}
self.nodes += 1
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : T , UpperCAmelCase__ : T , UpperCAmelCase__ : int ):
'''simple docstring'''
# Add an edge between 2 nodes in the graph
self.add_node(UpperCAmelCase__ )
self.add_node(UpperCAmelCase__ )
lowercase : Optional[Any] =weight
lowercase : List[Any] =weight
def _lowerCAmelCase ( __magic_name__ : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
lowercase : dict[T, int] ={node: maxsize for node in graph.connections}
lowercase : dict[T, T | None] ={node: None for node in graph.connections}
lowercase : MinPriorityQueue[T] =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__magic_name__ , __magic_name__ )
if priority_queue.is_empty():
return dist, parent
# initialization
lowercase : Optional[int] =priority_queue.extract_min()
lowercase : Optional[Any] =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase : Any =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__magic_name__ , dist[neighbour] )
lowercase : List[str] =node
# running prim's algorithm
while not priority_queue.is_empty():
lowercase : str =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase : Any =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__magic_name__ , dist[neighbour] )
lowercase : Tuple =node
return dist, parent
| 88 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : int ={}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
lowercase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
with open(__magic_name__ ) as f:
lowercase : Optional[int] =f.read(1 )
lowercase : List[Any] =start_node
lowercase : List[Any] =[]
lowercase : str =start_node
lowercase : str =0
while visiting not in first_solution:
lowercase : Optional[int] =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
lowercase : List[Any] =k[1]
lowercase : str =k[0]
first_solution.append(__magic_name__ )
lowercase : Any =distance_of_first_solution + int(__magic_name__ )
lowercase : Optional[int] =best_node
first_solution.append(__magic_name__ )
lowercase : str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple:
lowercase : Tuple =[]
for n in solution[1:-1]:
lowercase : Dict =solution.index(__magic_name__ )
for kn in solution[1:-1]:
lowercase : Tuple =solution.index(__magic_name__ )
if n == kn:
continue
lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ )
lowercase : Optional[int] =kn
lowercase : List[Any] =n
lowercase : List[Any] =0
for k in _tmp[:-1]:
lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[int] =distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : str =1
lowercase : List[Any] =first_solution
lowercase : Any =[]
lowercase : str =distance_of_first_solution
lowercase : str =solution
while count <= iters:
lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ )
lowercase : Dict =0
lowercase : int =neighborhood[index_of_best_solution]
lowercase : Optional[int] =len(__magic_name__ ) - 1
lowercase : List[Any] =False
while not found:
lowercase : List[Any] =0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
lowercase : List[str] =best_solution[i]
lowercase : Dict =solution[i]
break
lowercase : Any =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : str =True
lowercase : int =best_solution[:-1]
lowercase : Any =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : Optional[int] =cost
lowercase : str =solution
else:
lowercase : Optional[int] =index_of_best_solution + 1
lowercase : List[Any] =neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
lowercase : Optional[int] =count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple:
lowercase : List[str] =generate_neighbours(args.File )
lowercase , lowercase : Optional[Any] =generate_first_solution(
args.File , __magic_name__ )
lowercase , lowercase : int =tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'big_bird'
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=50358 , UpperCAmelCase__ : str=768 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : str=3072 , UpperCAmelCase__ : Dict="gelu_new" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Union[str, Any]=4096 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Any=1E-12 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Optional[Any]=66 , UpperCAmelCase__ : str="block_sparse" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Tuple=64 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Tuple=None , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , sep_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : str =vocab_size
lowercase : Tuple =max_position_embeddings
lowercase : Dict =hidden_size
lowercase : Dict =num_hidden_layers
lowercase : List[Any] =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[int] =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Tuple =attention_probs_dropout_prob
lowercase : List[str] =initializer_range
lowercase : Optional[int] =type_vocab_size
lowercase : int =layer_norm_eps
lowercase : Dict =use_cache
lowercase : Dict =rescale_embeddings
lowercase : List[Any] =attention_type
lowercase : Optional[int] =use_bias
lowercase : str =block_size
lowercase : Dict =num_random_blocks
lowercase : List[Any] =classifier_dropout
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : str ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : List[Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 1 |
'''simple docstring'''
UpperCamelCase_ = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCamelCase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase_ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 88 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BioGptTokenizer
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : str ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase : Any ='''lower'''
lowercase : int =['''low''', '''er</w>''']
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =tokens + ['''<unk>''']
lowercase : Any =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCamelCase_ = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCamelCase_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , UpperCAmelCase__ : Iterable[int] ):
'''simple docstring'''
lowercase : Node | None =None
for i in sorted(UpperCAmelCase__ , reverse=UpperCAmelCase__ ):
lowercase : Tuple =Node(UpperCAmelCase__ , self.head )
def __iter__( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.head
while node:
yield node.data
lowercase : List[str] =node.next_node
def __len__( self : List[Any] ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Dict ):
'''simple docstring'''
return " -> ".join([str(UpperCAmelCase__ ) for node in self] )
def _lowerCAmelCase ( __magic_name__ : SortedLinkedList , __magic_name__ : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(__magic_name__ ) + list(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 88 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __lt__( self : Dict , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self[-1] == other[-1]
def _lowerCAmelCase ( __magic_name__ : list ) -> list:
lowercase : list[Stack] =[]
# sort into stacks
for element in collection:
lowercase : List[Any] =Stack([element] )
lowercase : Union[str, Any] =bisect_left(__magic_name__ , __magic_name__ )
if i != len(__magic_name__ ):
stacks[i].append(__magic_name__ )
else:
stacks.append(__magic_name__ )
# use a heap-based merge to merge stack efficiently
lowercase : Tuple =merge(*(reversed(__magic_name__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase_ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase_ = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 88 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'vision-encoder-decoder'
lowerCamelCase_ = True
def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase : Optional[Any] =kwargs.pop('''encoder''' )
lowercase : List[Any] =encoder_config.pop('''model_type''' )
lowercase : List[str] =kwargs.pop('''decoder''' )
lowercase : Dict =decoder_config.pop('''model_type''' )
lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =True
@classmethod
def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase : int =True
lowercase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =copy.deepcopy(self.__dict__ )
lowercase : Union[str, Any] =self.encoder.to_dict()
lowercase : Union[str, Any] =self.decoder.to_dict()
lowercase : int =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =OrderedDict()
lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
lowercase : Optional[Any] =OrderedDict()
lowercase : List[Any] =super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape
lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase : List[str] =dummy_input.pop('''input_ids''' )
lowercase : Tuple =dummy_input.pop('''attention_mask''' )
lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ )
return common_inputs
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ):
'''simple docstring'''
lowercase : List[Any] =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
UpperCamelCase_ = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , UpperCAmelCase__ : set[int] , UpperCAmelCase__ : Mapping[EdgeT, int] ):
'''simple docstring'''
lowercase : set[int] =vertices
lowercase : dict[EdgeT, int] ={
(min(UpperCAmelCase__ ), max(UpperCAmelCase__ )): weight for edge, weight in edges.items()
}
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : EdgeT , UpperCAmelCase__ : int ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowercase : str =weight
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Graph =Graph({min(self.vertices )} , {} )
lowercase : EdgeT
lowercase : int
lowercase : EdgeT
lowercase : int
while len(subgraph.vertices ) < len(self.vertices ):
lowercase : List[Any] =max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowercase : List[Any] =edge
lowercase : int =weight
subgraph.add_edge(UpperCAmelCase__ , UpperCAmelCase__ )
return subgraph
def _lowerCAmelCase ( __magic_name__ : str = "p107_network.txt" ) -> int:
lowercase : str =os.path.abspath(os.path.dirname(__magic_name__ ) )
lowercase : str =os.path.join(__magic_name__ , __magic_name__ )
lowercase : dict[EdgeT, int] ={}
lowercase : list[str]
lowercase : int
lowercase : int
with open(__magic_name__ ) as f:
lowercase : Tuple =f.read().strip().split('''\n''' )
lowercase : Union[str, Any] =[line.split(''',''' ) for line in data]
for edgea in range(1 , len(__magic_name__ ) ):
for edgea in range(__magic_name__ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowercase : Tuple =int(adjaceny_matrix[edgea][edgea] )
lowercase : Graph =Graph(set(range(len(__magic_name__ ) ) ) , __magic_name__ )
lowercase : Graph =graph.prims_algorithm()
lowercase : int =sum(graph.edges.values() )
lowercase : int =sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 | 1 |
'''simple docstring'''
import os
import sys
UpperCamelCase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowerCAmelCase ( *__magic_name__ : Optional[Any] , **__magic_name__ : Optional[Any] ) -> Any:
return AutoConfig.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowerCAmelCase ( *__magic_name__ : int , **__magic_name__ : List[Any] ) -> List[str]:
return AutoTokenizer.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModel.__doc__ )
def _lowerCAmelCase ( *__magic_name__ : List[Any] , **__magic_name__ : Optional[int] ) -> List[str]:
return AutoModel.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowerCAmelCase ( *__magic_name__ : int , **__magic_name__ : str ) -> Dict:
return AutoModelForCausalLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowerCAmelCase ( *__magic_name__ : Tuple , **__magic_name__ : Dict ) -> str:
return AutoModelForMaskedLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowerCAmelCase ( *__magic_name__ : str , **__magic_name__ : List[Any] ) -> int:
return AutoModelForSequenceClassification.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowerCAmelCase ( *__magic_name__ : List[Any] , **__magic_name__ : List[Any] ) -> Optional[int]:
return AutoModelForQuestionAnswering.from_pretrained(*__magic_name__ , **__magic_name__ )
| 88 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]:
lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase : List[str] =json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowercase : Tuple =args.output + '''.pt'''
lowercase : int =OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowercase : int =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase : int =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase : Union[str, Any] =8
lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase : Union[str, Any] =key_name[-9:-7]
for i in range(16 ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase : Any =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
lowercase : Dict =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Any =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
lowercase : int =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
lowercase : int =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Dict =state[:, 0, :, :]
lowercase : Tuple =state[:, 1, :, :]
lowercase : List[Any] =state[:, 2, :, :]
lowercase : Optional[int] =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase : Dict =torch.tensor(__magic_name__ )
lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase : Tuple =torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase : List[Any] =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase : Optional[Any] ='''model.%s.weight''' % nlayer
lowercase : Optional[int] =vnp.copy() # same in embedded
lowercase : List[Any] =torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
lowercase : Tuple ='''lm_head.weight'''
lowercase : str =vnp.copy() # same in embedded
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
lowercase : List[str] ='''final_logits_bias'''
lowercase : Dict =vnp.copy() # same in embedded
lowercase : Tuple =state.reshape((1, -1) )
lowercase : Dict =torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
lowercase : Dict ='''model.last_project.weight'''
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
lowercase : List[Any] ='''model.last_project.bias'''
lowercase : str =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 88 | 1 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCamelCase_ = pytest.mark.integration
UpperCamelCase_ = {"comet"}
UpperCamelCase_ = importlib.util.find_spec("""fairseq""") is not None
UpperCamelCase_ = {"code_eval"}
UpperCamelCase_ = os.name == "nt"
UpperCamelCase_ = {"bertscore", "frugalscore", "perplexity"}
UpperCamelCase_ = importlib.util.find_spec("""transformers""") is not None
def _lowerCAmelCase ( __magic_name__ : str ):
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self : Optional[int] , __magic_name__ : Optional[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''\"test requires Fairseq\"''' )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def _lowerCAmelCase ( __magic_name__ : Dict ):
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self : Dict , __magic_name__ : str ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''\"test requires transformers\"''' )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def _lowerCAmelCase ( __magic_name__ : Dict ):
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self : Optional[Any] , __magic_name__ : Optional[int] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''\"test not supported on Windows\"''' )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def _lowerCAmelCase ( ):
lowercase : Tuple =[metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@local
class __SCREAMING_SNAKE_CASE ( parameterized.TestCase ):
lowerCamelCase_ = {}
lowerCamelCase_ = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''[...]'''
lowercase : int =importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , A__ ) ).module_path )
lowercase : str =datasets.load.import_main_class(metric_module.__name__ , dataset=A__ )
# check parameters
lowercase : List[Any] =inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(A__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowercase : Union[str, Any] =doctest.testmod(A__ , verbose=A__ , raise_on_error=A__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] ='''[...]'''
lowercase : List[str] =importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , A__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowercase : Optional[Any] =doctest.testmod(A__ , verbose=A__ , raise_on_error=A__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](A__ ):
yield
else:
yield
@contextmanager
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
def load_local_metric(UpperCAmelCase__ : Optional[int] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ):
return load_metric(os.path.join('''metrics''' , A__ ) , *A__ , **A__ )
with patch('''datasets.load_metric''' ) as mock_load_metric:
lowercase : str =load_local_metric
yield
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
def wrapper(UpperCAmelCase__ : Any ):
lowercase : Union[str, Any] =contextmanager(A__ )
lowercase : List[Any] =patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def _lowerCAmelCase ( __magic_name__ : Dict ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class __SCREAMING_SNAKE_CASE ( _lowerCamelCase ):
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
lowercase : int =MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def _lowerCAmelCase ( __magic_name__ : str ):
import torch
def bert_cos_score_idf(__magic_name__ : Tuple , __magic_name__ : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Dict ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
lowercase : List[Any] =bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def _lowerCAmelCase ( __magic_name__ : int ):
def load_from_checkpoint(__magic_name__ : Dict ):
class __SCREAMING_SNAKE_CASE :
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : int ):
'''simple docstring'''
assert len(A__ ) == 2
lowercase : Dict =[0.19, 0.92]
return scores, sum(A__ ) / len(A__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
lowercase : List[Any] =None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
lowercase : List[str] =load_from_checkpoint
yield
def _lowerCAmelCase ( ):
lowercase : Union[str, Any] =load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
lowercase : Union[str, Any] ='''ERROR'''
lowercase : List[Any] =f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(SCREAMING_SNAKE_CASE_ , match=re.escape(SCREAMING_SNAKE_CASE_ ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE_ )
| 700 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = BigBirdTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''<s>'''
lowercase : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Any =self.get_rust_tokenizer()
lowercase : int ='''I was born in 92000, and this is falsé.'''
lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ )
lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''Hello World!'''
lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict =''' '''.join(UpperCAmelCase__ )
lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' )
lowercase : Dict =BigBirdModel(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 88 | 0 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _snake_case ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : Optional[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowercase : Any =do_rescale
lowercase : Optional[Any] =rescale_factor
lowercase : Dict =do_pad
lowercase : Tuple =pad_size
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : int ):
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase , lowercase : Optional[int] =get_image_size(lowerCAmelCase__ )
lowercase : List[Any] =(old_height // size + 1) * size - old_height
lowercase : List[Any] =(old_width // size + 1) * size - old_width
return pad(lowerCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowerCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : str =do_pad if do_pad is not None else self.do_pad
lowercase : List[str] =pad_size if pad_size is not None else self.pad_size
lowercase : Any =make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase : str =[to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_rescale:
lowercase : Optional[int] =[self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_pad:
lowercase : int =[self.pad(lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
lowercase : str =[to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 701 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 88 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : Dict ) -> int:
lowercase : int =1
lowercase : str =2
while i * i <= n:
lowercase : Union[str, Any] =0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _lowerCAmelCase ( ) -> Tuple:
lowercase : Tuple =1
lowercase : Any =1
while True:
i += 1
t_num += i
if count_divisors(_SCREAMING_SNAKE_CASE ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 702 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool:
lowercase : Optional[int] =first_str.lower().strip()
lowercase : Union[str, Any] =second_str.lower().strip()
# Remove whitespace
lowercase : Optional[int] =first_str.replace(''' ''' , '''''' )
lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__magic_name__ ) != len(__magic_name__ ):
return False
# Default values for count should be 0
lowercase : defaultdict[str, int] =defaultdict(__magic_name__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__magic_name__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input("""Enter the first string """).strip()
UpperCamelCase_ = input("""Enter the second string """).strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 88 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 703 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88 | 0 |
'''simple docstring'''
UpperCamelCase_ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def _lowerCAmelCase ( __magic_name__ : int ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase , _lowercase ):
lowercase : Union[str, Any] =f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
lowercase : int =''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
lowercase : Dict =len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase : Optional[Any] =B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
lowercase : Optional[int] =B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def _lowerCAmelCase ( __magic_name__ : int ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
lowercase : int =(
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
lowercase : Optional[int] =encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
lowercase : Optional[int] =encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase : str =encoded_data[:-padding]
lowercase : Tuple =''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase : Union[str, Any] =''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
lowercase : List[str] =[
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 88 | 0 |
'''simple docstring'''
from typing import Any
def _lowerCAmelCase ( __magic_name__ : Any ) -> list[Any]:
if not input_list:
return []
lowercase : Dict =[input_list.count(lowerCamelCase_ ) for value in input_list]
lowercase : Optional[Any] =max(lowerCamelCase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCamelCase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ = """transformer"""
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase_ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
UpperCamelCase_ = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Optional[int]:
lowercase : int =torch.load(__magic_name__ , map_location='''cpu''' )
return sd
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple=rename_keys_prefix ) -> List[str]:
lowercase : Dict =OrderedDict()
lowercase : Tuple =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase : Optional[Any] =key
for name_pair in rename_keys_prefix:
lowercase : Optional[Any] =new_key.replace(name_pair[0] , name_pair[1] )
lowercase : int =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase : List[Any] =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Any ) -> Any:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowercase : Any ='pretraining'
if "vcr" in checkpoint_path:
lowercase : Dict ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
lowercase : Tuple ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
lowercase : List[Any] ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
lowercase : Optional[int] ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowercase : Tuple ={'visual_embedding_dim': 512}
lowercase : List[Any] ='multichoice'
elif "vqa_advanced" in checkpoint_path:
lowercase : Tuple ={'visual_embedding_dim': 2048}
lowercase : str ='vqa_advanced'
elif "vqa" in checkpoint_path:
lowercase : List[str] ={'visual_embedding_dim': 2048, 'num_labels': 3129}
lowercase : Optional[Any] ='vqa'
elif "nlvr" in checkpoint_path:
lowercase : int ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
lowercase : List[Any] ='nlvr'
lowercase : Any =VisualBertConfig(**__magic_name__ )
# Load State Dict
lowercase : Any =load_state_dict(__magic_name__ )
lowercase : Optional[Any] =get_new_dict(__magic_name__ , __magic_name__ )
if model_type == "pretraining":
lowercase : Union[str, Any] =VisualBertForPreTraining(__magic_name__ )
elif model_type == "vqa":
lowercase : int =VisualBertForQuestionAnswering(__magic_name__ )
elif model_type == "nlvr":
lowercase : Union[str, Any] =VisualBertForVisualReasoning(__magic_name__ )
elif model_type == "multichoice":
lowercase : List[Any] =VisualBertForMultipleChoice(__magic_name__ )
model.load_state_dict(__magic_name__ )
# Save Checkpoints
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
UpperCamelCase_ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 707 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88 | 0 |
'''simple docstring'''
UpperCamelCase_ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCAmelCase ( __magic_name__ : str ) -> int:
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCamelCase_ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 708 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 709 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ) -> Any:
lowercase : Union[str, Any] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Tuple =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Any]=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Any =""
else:
lowercase : List[Any] ="vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Union[str, Any] =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowercase : int =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase : str =in_proj_weight[
: config.hidden_size, :
]
lowercase : List[str] =in_proj_bias[: config.hidden_size]
lowercase : Optional[int] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
lowercase : Tuple =in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[Any]:
lowercase : List[str] =["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : List[Any] ) -> Dict:
lowercase : str =dct.pop(lowerCamelCase__ )
lowercase : Optional[Any] =val
def _lowerCAmelCase ( ) -> Tuple:
lowercase : Dict ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : int =Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : List[str]=True ) -> str:
lowercase : Dict =ViTConfig()
# patch_size
if model_name[-1] == "8":
lowercase : Union[str, Any] =8
# set labels if required
if not base_model:
lowercase : str =1000
lowercase : str ="huggingface/label-files"
lowercase : str ="imagenet-1k-id2label.json"
lowercase : Optional[int] =json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Tuple ={int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase : str =idalabel
lowercase : Tuple ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowercase : str =384
lowercase : Optional[int] =1536
lowercase : Union[str, Any] =12
lowercase : int =6
# load original model from torch hub
lowercase : List[str] =torch.hub.load('''facebookresearch/dino:main''' , lowerCamelCase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase : Tuple =original_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase__ )
lowercase : Union[str, Any] =create_rename_keys(lowerCamelCase__ , base_model=lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# load HuggingFace model
if base_model:
lowercase : List[str] =ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ ).eval()
else:
lowercase : str =ViTForImageClassification(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor
lowercase : List[str] =ViTImageProcessor()
lowercase : Dict =image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase : Dict =encoding["pixel_values"]
lowercase : str =model(lowerCamelCase__ )
if base_model:
lowercase : List[Any] =original_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowercase : int =original_model(lowerCamelCase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase__ , outputs.logits , atol=1E-3 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
UpperCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 710 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCamelCase_ = logging.getLogger(__name__)
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> Tuple:
if os.path.exists(_lowerCAmelCase ):
if os.path.exists(os.path.join(_lowerCAmelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase , '''config.json''' ) ):
os.remove(os.path.join(_lowerCAmelCase , '''config.json''' ) )
if os.path.exists(os.path.join(_lowerCAmelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_lowerCAmelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[int]=False ) -> Union[str, Any]:
lowercase : Optional[Any] =2
if unlogit:
lowercase : List[Any] =torch.pow(_lowerCAmelCase , _lowerCAmelCase )
lowercase : int =p * torch.log(_lowerCAmelCase )
lowercase : List[Any] =0
return -plogp.sum(dim=-1 )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Dict:
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(_lowerCAmelCase ) ) ) )
for row in range(len(_lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Any=True , __magic_name__ : Dict=True , __magic_name__ : int=None , __magic_name__ : str=False ) -> Tuple:
lowercase : List[str] =model.config.num_hidden_layers, model.config.num_attention_heads
lowercase : Tuple =torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
lowercase : Dict =torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
if head_mask is None:
lowercase : Any =torch.ones(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowercase : Any =None
lowercase : Union[str, Any] =0.0
lowercase : str =0.0
for step, inputs in enumerate(tqdm(_lowerCAmelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowercase : Union[str, Any] =tuple(t.to(args.device ) for t in inputs )
(lowercase ) : Union[str, Any] =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowercase : Union[str, Any] =model(_lowerCAmelCase , labels=_lowerCAmelCase , head_mask=_lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowercase : List[Any] =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCAmelCase ):
lowercase : Dict =entropy(attn.detach() , _lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowercase : Any =2
lowercase : int =torch.pow(torch.pow(_lowerCAmelCase , _lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowercase : List[str] =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_lowerCAmelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_lowerCAmelCase )
logger.info('''Head ranked by importance scores''' )
lowercase : str =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowercase : int =torch.arange(
head_importance.numel() , device=args.device )
lowercase : Optional[int] =head_ranks.view_as(_lowerCAmelCase )
print_ad_tensor(_lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> int:
lowercase : List[str] =compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase )
lowercase : List[str] =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _lowerCAmelCase , original_score * args.masking_threshold )
lowercase : Tuple =torch.ones_like(_lowerCAmelCase )
lowercase : int =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowercase : Tuple =original_score
while current_score >= original_score * args.masking_threshold:
lowercase : int =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowercase : Union[str, Any] =float('''Inf''' )
lowercase : List[str] =head_importance.view(-1 ).sort()[1]
if len(_lowerCAmelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowercase : List[Any] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowercase : Tuple =new_head_mask.view(-1 )
lowercase : Any =0.0
lowercase : Tuple =new_head_mask.view_as(_lowerCAmelCase )
lowercase : Optional[Any] =new_head_mask.clone().detach()
print_ad_tensor(_lowerCAmelCase )
# Compute metric and head importance again
lowercase : Optional[Any] =compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , head_mask=_lowerCAmelCase )
lowercase : Any =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(_lowerCAmelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Optional[int]:
lowercase : str =datetime.now()
lowercase : Optional[int] =compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase )
lowercase : Any =1 / loss
lowercase : str =datetime.now() - before_time
lowercase : Optional[Any] =sum(p.numel() for p in model.parameters() )
lowercase : Union[str, Any] ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase : Union[str, Any] =[
v,
]
assert sum(len(_lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCAmelCase )
lowercase : Dict =sum(p.numel() for p in model.parameters() )
lowercase : Tuple =datetime.now()
lowercase : Optional[Any] =compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase , actually_pruned=_lowerCAmelCase , )
lowercase : Optional[Any] =1 / loss
lowercase : List[str] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _lowerCAmelCase , _lowerCAmelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _lowerCAmelCase , _lowerCAmelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(_lowerCAmelCase , args.output_dir )
def _lowerCAmelCase ( ) -> Optional[int]:
lowercase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_lowerCAmelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_lowerCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_lowerCAmelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_lowerCAmelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_lowerCAmelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_lowerCAmelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_lowerCAmelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_lowerCAmelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_lowerCAmelCase , default=42 )
parser.add_argument('''--local_rank''' , type=_lowerCAmelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_lowerCAmelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_lowerCAmelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowercase : Any =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowercase : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowercase : Tuple =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowercase : str =torch.device('''cuda''' , args.local_rank )
lowercase : int =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowercase : List[str] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowercase : List[str] =nn.parallel.DistributedDataParallel(
_lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCAmelCase )
elif args.n_gpu > 1:
lowercase : str =nn.DataParallel(_lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCAmelCase )
torch.save(_lowerCAmelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _lowerCAmelCase )
# Prepare dataset
lowercase : List[str] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowercase : str =(torch.from_numpy(_lowerCAmelCase ),)
lowercase : Union[str, Any] =TensorDataset(*_lowerCAmelCase )
lowercase : Any =RandomSampler(_lowerCAmelCase )
lowercase : int =DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowercase : List[Any] =mask_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
prune_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : int ={}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
lowercase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
with open(__magic_name__ ) as f:
lowercase : Optional[int] =f.read(1 )
lowercase : List[Any] =start_node
lowercase : List[Any] =[]
lowercase : str =start_node
lowercase : str =0
while visiting not in first_solution:
lowercase : Optional[int] =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
lowercase : List[Any] =k[1]
lowercase : str =k[0]
first_solution.append(__magic_name__ )
lowercase : Any =distance_of_first_solution + int(__magic_name__ )
lowercase : Optional[int] =best_node
first_solution.append(__magic_name__ )
lowercase : str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple:
lowercase : Tuple =[]
for n in solution[1:-1]:
lowercase : Dict =solution.index(__magic_name__ )
for kn in solution[1:-1]:
lowercase : Tuple =solution.index(__magic_name__ )
if n == kn:
continue
lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ )
lowercase : Optional[int] =kn
lowercase : List[Any] =n
lowercase : List[Any] =0
for k in _tmp[:-1]:
lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[int] =distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : str =1
lowercase : List[Any] =first_solution
lowercase : Any =[]
lowercase : str =distance_of_first_solution
lowercase : str =solution
while count <= iters:
lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ )
lowercase : Dict =0
lowercase : int =neighborhood[index_of_best_solution]
lowercase : Optional[int] =len(__magic_name__ ) - 1
lowercase : List[Any] =False
while not found:
lowercase : List[Any] =0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
lowercase : List[str] =best_solution[i]
lowercase : Dict =solution[i]
break
lowercase : Any =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : str =True
lowercase : int =best_solution[:-1]
lowercase : Any =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : Optional[int] =cost
lowercase : str =solution
else:
lowercase : Optional[int] =index_of_best_solution + 1
lowercase : List[Any] =neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
lowercase : Optional[int] =count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple:
lowercase : List[str] =generate_neighbours(args.File )
lowercase , lowercase : Optional[Any] =generate_first_solution(
args.File , __magic_name__ )
lowercase , lowercase : int =tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
UpperCamelCase_ = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
UpperCamelCase_ = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class __SCREAMING_SNAKE_CASE ( _snake_case ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = RoFormerTokenizer
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[str]="[UNK]" , UpperCAmelCase__ : Optional[int]="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : List[str]="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
):
lowercase : int =getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''' ) )
lowercase : Optional[Any] =do_lower_case
lowercase : Any =strip_accents
lowercase : List[Any] =pre_tok_class(**UpperCAmelCase__ )
lowercase : str =do_lower_case
def __getstate__( self : Dict ):
'''simple docstring'''
lowercase : int =self.__dict__.copy()
lowercase : Union[str, Any] =BertPreTokenizer()
return state
def __setstate__( self : List[Any] , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Union[str, Any] =d
lowercase : str =self.__dict__['''_tokenizer'''].get_vocab()
lowercase : List[Any] =PreTokenizer.custom(JiebaPreTokenizer(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any]=None ):
'''simple docstring'''
lowercase : Union[str, Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : List[str] =[self.sep_token_id]
lowercase : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
lowercase : List[str] =self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=False , **UpperCAmelCase__ : str , ):
'''simple docstring'''
lowercase : List[str] =BertPreTokenizer()
return super().save_pretrained(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
| 713 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BioGptTokenizer
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : str ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase : Any ='''lower'''
lowercase : int =['''low''', '''er</w>''']
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =tokens + ['''<unk>''']
lowercase : Any =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 88 | 0 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : List[str] ) -> Optional[int]:
lowercase : Optional[int] =f'''{sampling_rate}'''
lowercase : Optional[Any] ='''1'''
lowercase : Optional[Any] ='''f32le'''
lowercase : List[Any] =[
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(lowerCAmelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase : Optional[Any] =ffmpeg_process.communicate(lowerCAmelCase__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
lowercase : Union[str, Any] =output_stream[0]
lowercase : str =np.frombuffer(lowerCAmelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[str] = "f32le" , ) -> List[Any]:
lowercase : Tuple =f'''{sampling_rate}'''
lowercase : Optional[int] ='''1'''
if format_for_conversion == "s16le":
lowercase : List[Any] =2
elif format_for_conversion == "f32le":
lowercase : str =4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowercase : Optional[int] =platform.system()
if system == "Linux":
lowercase : List[str] ='''alsa'''
lowercase : List[str] ='''default'''
elif system == "Darwin":
lowercase : List[Any] ='''avfoundation'''
lowercase : Optional[Any] =''':0'''
elif system == "Windows":
lowercase : Tuple ='''dshow'''
lowercase : Tuple ='''default'''
lowercase : str =[
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
lowercase : int =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase : Dict =_ffmpeg_stream(lowerCAmelCase__ , lowerCAmelCase__ )
for item in iterator:
yield item
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : int = None , __magic_name__ : Any = None , __magic_name__ : Any = "f32le" , ) -> Optional[int]:
if stream_chunk_s is not None:
lowercase : Dict =stream_chunk_s
else:
lowercase : str =chunk_length_s
lowercase : List[str] =ffmpeg_microphone(lowerCAmelCase__ , lowerCAmelCase__ , format_for_conversion=lowerCAmelCase__ )
if format_for_conversion == "s16le":
lowercase : str =np.intaa
lowercase : Union[str, Any] =2
elif format_for_conversion == "f32le":
lowercase : Optional[int] =np.floataa
lowercase : Optional[Any] =4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowercase : Union[str, Any] =chunk_length_s / 6
lowercase : Any =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase__ , (int, float) ):
lowercase : List[Any] =[stride_length_s, stride_length_s]
lowercase : Union[str, Any] =int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase : Optional[int] =int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase : Any =datetime.datetime.now()
lowercase : Optional[Any] =datetime.timedelta(seconds=lowerCAmelCase__ )
for item in chunk_bytes_iter(lowerCAmelCase__ , lowerCAmelCase__ , stride=(stride_left, stride_right) , stream=lowerCAmelCase__ ):
# Put everything back in numpy scale
lowercase : Dict =np.frombuffer(item['''raw'''] , dtype=lowerCAmelCase__ )
lowercase : Any =(
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
lowercase : List[str] =sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] = False ) -> int:
lowercase : Any =B''''''
lowercase , lowercase : int =stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowercase : Any =0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase__ ) < chunk_len:
lowercase : str =(_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase__ ) >= chunk_len:
# We are flushing the accumulator
lowercase : Optional[int] =(_stride_left, stride_right)
lowercase : Optional[Any] ={'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
lowercase : Dict =False
yield item
lowercase : Union[str, Any] =stride_left
lowercase : str =acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase__ ) > stride_left:
lowercase : Union[str, Any] ={'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
lowercase : Dict =False
yield item
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Any ) -> Any:
lowercase : Optional[Any] =2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase__ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase__ ) as ffmpeg_process:
while True:
lowercase : Dict =ffmpeg_process.stdout.read(lowerCAmelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 714 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=13 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Optional[int]=[10, 20, 30, 40] , UpperCAmelCase__ : int=[2, 2, 3, 2] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : int=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[Any]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : str=[2, 3, 4] , UpperCAmelCase__ : Optional[Any]=None , ):
'''simple docstring'''
lowercase : Dict =parent
lowercase : List[Any] =batch_size
lowercase : str =image_size
lowercase : Optional[Any] =num_channels
lowercase : Optional[Any] =num_stages
lowercase : str =hidden_sizes
lowercase : Optional[Any] =depths
lowercase : Tuple =is_training
lowercase : str =use_labels
lowercase : str =intermediate_size
lowercase : List[Any] =hidden_act
lowercase : Dict =num_labels
lowercase : Optional[int] =initializer_range
lowercase : Union[str, Any] =out_features
lowercase : Any =out_indices
lowercase : Dict =scope
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Dict =None
if self.use_labels:
lowercase : Tuple =ids_tensor([self.batch_size] , self.num_labels )
lowercase : Dict =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Union[str, Any] =ConvNextVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowercase : List[str] =model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Dict =ConvNextVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowercase : Dict =model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Optional[Any] =ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowercase : Union[str, Any] =model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase : Optional[Any] =None
lowercase : List[str] =ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowercase : Optional[Any] =model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
lowercase : Optional[Any] =config_and_inputs
lowercase : Optional[int] ={"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase : Tuple =config_and_inputs
lowercase : Optional[int] ={"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
lowerCamelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =ConvNextVaModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : int =True
if model_class.__name__ in [
*get_values(_UpperCamelCase ),
*get_values(_UpperCamelCase ),
]:
continue
lowercase : Dict =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
lowercase : int =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowercase : List[str] =model(**_UpperCamelCase ).loss
loss.backward()
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase : Dict =self.model_tester.prepare_config_and_inputs_with_labels()
lowercase : Union[str, Any] =False
lowercase : int =True
if (
model_class.__name__
in [*get_values(_UpperCamelCase ), *get_values(_UpperCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase : List[Any] =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase : str =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowercase : Optional[Any] =model(**_UpperCamelCase ).loss
loss.backward()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str =model_class(_UpperCamelCase )
lowercase : Any =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Union[str, Any] =[*signature.parameters.keys()]
lowercase : List[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict ):
lowercase : Optional[Any] =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase : Optional[int] =model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowercase : str =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : List[Any] =self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Optional[int] =True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Tuple =ConvNextVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _lowerCAmelCase ( ) -> Any:
lowercase : str =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_UpperCamelCase )
lowercase : int =self.default_image_processor
lowercase : List[Any] =prepare_img()
lowercase : Optional[int] =preprocessor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase : Optional[Any] =model(**_UpperCamelCase )
# verify the logits
lowercase : str =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
lowercase : str =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
| 715 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( a__ ):
def __init__( self : Optional[int] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 716 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
set_seed(770)
UpperCamelCase_ = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
UpperCamelCase_ = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
UpperCamelCase_ = os.path.dirname(os.path.abspath(__file__))
UpperCamelCase_ = os.path.join(os.path.expanduser("""~"""), """.cache""")
UpperCamelCase_ = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str=False ) -> Any:
lowercase : List[str] =model_type
if use_small:
key += "_small"
return os.path.join(_lowerCamelCase , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
hf_hub_download(repo_id=_lowerCamelCase , filename=_lowerCamelCase , local_dir=_lowerCamelCase )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : List[str]=False , __magic_name__ : List[str]="text" ) -> int:
if model_type == "text":
lowercase : Any =BarkSemanticModel
lowercase : Dict =BarkSemanticConfig
lowercase : Tuple =BarkSemanticGenerationConfig
elif model_type == "coarse":
lowercase : Optional[int] =BarkCoarseModel
lowercase : Union[str, Any] =BarkCoarseConfig
lowercase : Optional[Any] =BarkCoarseGenerationConfig
elif model_type == "fine":
lowercase : List[str] =BarkFineModel
lowercase : Optional[Any] =BarkFineConfig
lowercase : Any =BarkFineGenerationConfig
else:
raise NotImplementedError()
lowercase : List[Any] =f'''{model_type}_small''' if use_small else model_type
lowercase : Optional[int] =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_lowerCamelCase ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
lowercase : Optional[Any] =torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
# this is a hack
lowercase : Any =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
lowercase : Union[str, Any] =model_args["vocab_size"]
lowercase : Tuple =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowercase : Optional[Any] =model_args.pop('''n_head''' )
lowercase : Optional[Any] =model_args.pop('''n_embd''' )
lowercase : Any =model_args.pop('''n_layer''' )
lowercase : int =ConfigClass(**checkpoint['''model_args'''] )
lowercase : Optional[Any] =ModelClass(config=_lowerCamelCase )
lowercase : int =GenerationConfigClass()
lowercase : Any =model_generation_config
lowercase : Optional[int] =checkpoint["model"]
# fixup checkpoint
lowercase : Dict ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(_lowerCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
lowercase : List[Any] =k[len(_lowerCamelCase ) :]
for old_layer_name in new_layer_name_dict:
lowercase : List[str] =new_k.replace(_lowerCamelCase , new_layer_name_dict[old_layer_name] )
lowercase : Optional[int] =state_dict.pop(_lowerCamelCase )
lowercase : Tuple =set(state_dict.keys() ) - set(model.state_dict().keys() )
lowercase : str ={k for k in extra_keys if not k.endswith('''.attn.bias''' )}
lowercase : str =set(model.state_dict().keys() ) - set(state_dict.keys() )
lowercase : str ={k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(_lowerCamelCase ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(_lowerCamelCase ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
lowercase : List[str] =model.num_parameters(exclude_embeddings=_lowerCamelCase )
lowercase : Optional[Any] =checkpoint["best_val_loss"].item()
logger.info(f'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(_lowerCamelCase , 3 )} loss''' )
model.eval()
model.to(_lowerCamelCase )
del checkpoint, state_dict
return model
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : str=False , __magic_name__ : Any="text" ) -> List[Any]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowercase : Dict ="cpu" # do conversion on cpu
lowercase : Tuple =_get_ckpt_path(_lowerCamelCase , use_small=_lowerCamelCase )
lowercase : str =_load_model(_lowerCamelCase , _lowerCamelCase , model_type=_lowerCamelCase , use_small=_lowerCamelCase )
# load bark initial model
lowercase : Optional[Any] =_bark_load_model(_lowerCamelCase , '''cpu''' , model_type=_lowerCamelCase , use_small=_lowerCamelCase )
if model_type == "text":
lowercase : Tuple =bark_model["model"]
if model.num_parameters(exclude_embeddings=_lowerCamelCase ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
lowercase : Union[str, Any] =5
lowercase : Tuple =10
if model_type in ["text", "coarse"]:
lowercase : int =torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowercase : int =bark_model(_lowerCamelCase )[0]
lowercase : Dict =model(_lowerCamelCase )
# take last logits
lowercase : Dict =output_new_model_total.logits[:, [-1], :]
else:
lowercase : str =3
lowercase : Optional[int] =8
lowercase : List[str] =torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowercase : Optional[Any] =model(_lowerCamelCase , _lowerCamelCase )
lowercase : Optional[int] =bark_model(_lowerCamelCase , _lowerCamelCase )
lowercase : List[Any] =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Dict , ) -> Dict:
lowercase : str =os.path.join(_lowerCamelCase , _lowerCamelCase )
lowercase : List[Any] =BarkSemanticConfig.from_pretrained(os.path.join(_lowerCamelCase , '''config.json''' ) )
lowercase : Dict =BarkCoarseConfig.from_pretrained(os.path.join(_lowerCamelCase , '''config.json''' ) )
lowercase : List[Any] =BarkFineConfig.from_pretrained(os.path.join(_lowerCamelCase , '''config.json''' ) )
lowercase : Dict =EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
lowercase : int =BarkSemanticModel.from_pretrained(_lowerCamelCase )
lowercase : Tuple =BarkCoarseModel.from_pretrained(_lowerCamelCase )
lowercase : Optional[Any] =BarkFineModel.from_pretrained(_lowerCamelCase )
lowercase : Optional[int] =EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
lowercase : int =BarkConfig.from_sub_model_configs(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase : Tuple =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowercase : Union[str, Any] =BarkModel(_lowerCamelCase )
lowercase : Any =semantic
lowercase : Any =coarseAcoustic
lowercase : Any =fineAcoustic
lowercase : Tuple =codec
lowercase : Any =bark_generation_config
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
bark.save_pretrained(_lowerCamelCase , repo_id=_lowerCamelCase , push_to_hub=_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
UpperCamelCase_ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 717 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'vision-encoder-decoder'
lowerCamelCase_ = True
def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase : Optional[Any] =kwargs.pop('''encoder''' )
lowercase : List[Any] =encoder_config.pop('''model_type''' )
lowercase : List[str] =kwargs.pop('''decoder''' )
lowercase : Dict =decoder_config.pop('''model_type''' )
lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =True
@classmethod
def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase : int =True
lowercase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =copy.deepcopy(self.__dict__ )
lowercase : Union[str, Any] =self.encoder.to_dict()
lowercase : Union[str, Any] =self.decoder.to_dict()
lowercase : int =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =OrderedDict()
lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
lowercase : Optional[Any] =OrderedDict()
lowercase : List[Any] =super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape
lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase : List[str] =dummy_input.pop('''input_ids''' )
lowercase : Tuple =dummy_input.pop('''attention_mask''' )
lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ )
return common_inputs
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ):
'''simple docstring'''
lowercase : List[Any] =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase_ = get_tests_dir("""fixtures""")
UpperCamelCase_ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
UpperCamelCase_ = get_tests_dir("""fixtures/dummy-config.json""")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =0
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Dict =AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowercase : int =AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
lowercase : str =WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
lowercase : Tuple =AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
lowercase : Optional[int] =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Tuple =AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase : Optional[int] =AutoFeatureExtractor.from_pretrained('''bert-base''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : List[str] =AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision='''aaaaaa''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Any =AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
lowercase : List[Any] =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
lowercase : List[str] =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ )
lowercase : List[str] =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
lowercase : Tuple =AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase : List[Any] =CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
lowercase : List[str] =AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
lowerCamelCase_ = True
try:
AutoConfig.register('''custom''' , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
lowercase : int =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowercase : int =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Union[str, Any] =AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(lowerCamelCase_ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 718 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 0 |
'''simple docstring'''
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : int=None ):
'''simple docstring'''
if red is not None:
lowercase : Union[str, Any] =red
if green is not None:
lowercase : Union[str, Any] =green
if blue is not None:
lowercase : Any =blue
if red_edge is not None:
lowercase : List[Any] =red_edge
if nir is not None:
lowercase : List[str] =nir
return True
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : List[str]="" , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict=None ):
'''simple docstring'''
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
lowercase : str ={
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[str]=0.08 , UpperCAmelCase__ : Any=1.22 , UpperCAmelCase__ : Optional[int]=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (self.nir / self.green) - 1
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Tuple =self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.nir - self.green
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any]=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self.nir / self.red
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[Any] =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowercase : Optional[int] =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.nir / self.red
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 719 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
if not head:
return True
# split the list to two parts
lowercase , lowercase : Optional[int] =head.next, head
while fast and fast.next:
lowercase : List[str] =fast.next.next
lowercase : Tuple =slow.next
lowercase : str =slow.next
lowercase : int =None # Don't forget here! But forget still works!
# reverse the second part
lowercase : Optional[int] =None
while second:
lowercase : Optional[Any] =second.next
lowercase : int =node
lowercase : Tuple =second
lowercase : str =nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowercase : List[Any] =node.next
lowercase : str =head.next
return True
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> Any:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowercase : Optional[Any] =head
while fast and fast.next:
lowercase , lowercase : Union[str, Any] =fast.next.next, slow.next
# 2. Push the second half into the stack
lowercase : Tuple =[slow.val]
while slow.next:
lowercase : str =slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowercase : Optional[int] =cur.next
return True
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> Tuple:
if not head or not head.next:
return True
lowercase : Optional[int] ={}
lowercase : Union[str, Any] =0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
lowercase : str =[pos]
lowercase : List[Any] =head.next
pos += 1
lowercase : Optional[Any] =pos - 1
lowercase : Optional[Any] =0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
lowercase : Optional[Any] =0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 720 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : list[int] ) -> Tuple:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(__UpperCamelCase , (list, tuple) ) or not all(
isinstance(__UpperCamelCase , __UpperCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowercase : List[Any] =numbers[0]
for i in range(1 , len(__UpperCamelCase ) ):
# update the maximum and minimum subarray products
lowercase : Dict =numbers[i]
if number < 0:
lowercase : Optional[int] =min_till_now, max_till_now
lowercase : List[str] =max(__UpperCamelCase , max_till_now * number )
lowercase : Any =min(__UpperCamelCase , min_till_now * number )
# update the maximum product found till now
lowercase : Tuple =max(__UpperCamelCase , __UpperCamelCase )
return max_prod
| 721 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]:
lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase : List[str] =json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowercase : Tuple =args.output + '''.pt'''
lowercase : int =OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowercase : int =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase : int =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase : Union[str, Any] =8
lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase : Union[str, Any] =key_name[-9:-7]
for i in range(16 ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase : Any =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
lowercase : Dict =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Any =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
lowercase : int =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
lowercase : int =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Dict =state[:, 0, :, :]
lowercase : Tuple =state[:, 1, :, :]
lowercase : List[Any] =state[:, 2, :, :]
lowercase : Optional[int] =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase : Dict =torch.tensor(__magic_name__ )
lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase : Tuple =torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase : List[Any] =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase : Optional[Any] ='''model.%s.weight''' % nlayer
lowercase : Optional[int] =vnp.copy() # same in embedded
lowercase : List[Any] =torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
lowercase : Tuple ='''lm_head.weight'''
lowercase : str =vnp.copy() # same in embedded
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
lowercase : List[str] ='''final_logits_bias'''
lowercase : Dict =vnp.copy() # same in embedded
lowercase : Tuple =state.reshape((1, -1) )
lowercase : Dict =torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
lowercase : Dict ='''model.last_project.weight'''
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
lowercase : List[Any] ='''model.last_project.bias'''
lowercase : str =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 88 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( __magic_name__ : str = "AAPL" ):
lowercase : Dict =f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowercase : Union[str, Any] =BeautifulSoup(requests.get(snake_case__ ).text , '''html.parser''' )
lowercase : Union[str, Any] ='''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 700 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = BigBirdTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''<s>'''
lowercase : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Any =self.get_rust_tokenizer()
lowercase : int ='''I was born in 92000, and this is falsé.'''
lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ )
lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''Hello World!'''
lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict =''' '''.join(UpperCAmelCase__ )
lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' )
lowercase : Dict =BigBirdModel(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 88 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = RobertaTokenizer
lowerCamelCase_ = RobertaTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = {'cls_token': '<s>'}
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Any =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase : Dict =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : List[Any] =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : Union[str, Any] ={'''unk_token''': '''<unk>'''}
lowercase : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : Tuple , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Any ='''lower newer'''
lowercase : Union[str, Any] ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : Optional[int] ='''lower newer'''
lowercase : Tuple =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase : str =tokenizer.tokenize(UpperCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =tokens + [tokenizer.unk_token]
lowercase : Union[str, Any] =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCAmelCase__ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCAmelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =self.tokenizer_class.from_pretrained('''roberta-base''' )
lowercase : List[Any] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Any =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : List[str] =tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowercase : Any =tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowercase : Union[str, Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : List[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Any =self.get_tokenizer()
lowercase : List[Any] ='''Encode this sequence.'''
lowercase : Tuple =tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowercase : Union[str, Any] =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowercase : Any =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowercase : Union[str, Any] =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : int =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing spaces after special tokens
lowercase : int ='''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )} ) # mask token has a left space
lowercase : int =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
lowercase : int ='''Encode <mask> sequence'''
lowercase : Any ='''Encode <mask>sequence'''
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : List[str] =encoded.index(UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Union[str, Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Optional[Any] =encoded.index(UpperCAmelCase__ )
lowercase : List[str] =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Any =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Optional[Any] =self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : int ='''A, <mask> AllenNLP sentence.'''
lowercase : List[str] =tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase : Optional[Any] =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase : int =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase : str =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
lowercase : str =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase : Optional[Any] =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCAmelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCAmelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : List[str] ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase : Union[str, Any] =F'''{text_of_1_token} {text_of_1_token}'''
lowercase : Any =self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
lowercase : Any =tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
lowercase : List[str] =tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
lowercase : List[str] =self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
lowercase : Tuple =F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase : int =self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
lowercase : Dict =tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ) + 1, 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
lowercase : Optional[Any] =self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ), 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
lowercase : int =tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ), 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
| 701 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 88 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : int ={
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
lowercase : int ={
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Optional[int] =np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase__ ) , x.transpose() ) )
lowercase : Dict =np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[str] =np.random.randn(3 , 4 )
lowercase : Any =torch.tensor(UpperCAmelCase__ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase__ ) , transpose(UpperCAmelCase__ ).numpy() ) )
lowercase : Optional[int] =np.random.randn(3 , 4 , 5 )
lowercase : Dict =torch.tensor(UpperCAmelCase__ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase__ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =np.random.randn(3 , 4 )
lowercase : Dict =tf.constant(UpperCAmelCase__ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase__ ) , transpose(UpperCAmelCase__ ).numpy() ) )
lowercase : Dict =np.random.randn(3 , 4 , 5 )
lowercase : int =tf.constant(UpperCAmelCase__ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase__ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[str] =np.random.randn(3 , 4 )
lowercase : Any =jnp.array(UpperCAmelCase__ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase__ ) , np.asarray(transpose(UpperCAmelCase__ ) ) ) )
lowercase : Optional[int] =np.random.randn(3 , 4 , 5 )
lowercase : Optional[Any] =jnp.array(UpperCAmelCase__ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCAmelCase__ , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase__ , (4, 3) ) , np.reshape(UpperCAmelCase__ , (4, 3) ) ) )
lowercase : Optional[int] =np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase__ , (12, 5) ) , np.reshape(UpperCAmelCase__ , (12, 5) ) ) )
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : str =np.random.randn(3 , 4 )
lowercase : Optional[int] =torch.tensor(UpperCAmelCase__ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase__ , (4, 3) ) , reshape(UpperCAmelCase__ , (4, 3) ).numpy() ) )
lowercase : int =np.random.randn(3 , 4 , 5 )
lowercase : Tuple =torch.tensor(UpperCAmelCase__ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase__ , (12, 5) ) , reshape(UpperCAmelCase__ , (12, 5) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =np.random.randn(3 , 4 )
lowercase : Any =tf.constant(UpperCAmelCase__ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase__ , (4, 3) ) , reshape(UpperCAmelCase__ , (4, 3) ).numpy() ) )
lowercase : Any =np.random.randn(3 , 4 , 5 )
lowercase : Optional[Any] =tf.constant(UpperCAmelCase__ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase__ , (12, 5) ) , reshape(UpperCAmelCase__ , (12, 5) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Tuple =np.random.randn(3 , 4 )
lowercase : List[str] =jnp.array(UpperCAmelCase__ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase__ , (4, 3) ) , np.asarray(reshape(UpperCAmelCase__ , (4, 3) ) ) ) )
lowercase : List[Any] =np.random.randn(3 , 4 , 5 )
lowercase : Optional[int] =jnp.array(UpperCAmelCase__ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase__ , (12, 5) ) , np.asarray(reshape(UpperCAmelCase__ , (12, 5) ) ) ) )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Union[str, Any] =np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase__ ) , np.squeeze(UpperCAmelCase__ ) ) )
lowercase : Optional[int] =np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase__ , axis=2 ) , np.squeeze(UpperCAmelCase__ , axis=2 ) ) )
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any =np.random.randn(1 , 3 , 4 )
lowercase : Union[str, Any] =torch.tensor(UpperCAmelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase__ ) , squeeze(UpperCAmelCase__ ).numpy() ) )
lowercase : Any =np.random.randn(1 , 4 , 1 , 5 )
lowercase : Optional[int] =torch.tensor(UpperCAmelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase__ , axis=2 ) , squeeze(UpperCAmelCase__ , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =np.random.randn(1 , 3 , 4 )
lowercase : Tuple =tf.constant(UpperCAmelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase__ ) , squeeze(UpperCAmelCase__ ).numpy() ) )
lowercase : Optional[Any] =np.random.randn(1 , 4 , 1 , 5 )
lowercase : Optional[int] =tf.constant(UpperCAmelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase__ , axis=2 ) , squeeze(UpperCAmelCase__ , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =np.random.randn(1 , 3 , 4 )
lowercase : int =jnp.array(UpperCAmelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase__ ) , np.asarray(squeeze(UpperCAmelCase__ ) ) ) )
lowercase : int =np.random.randn(1 , 4 , 1 , 5 )
lowercase : Optional[int] =jnp.array(UpperCAmelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase__ , axis=2 ) , np.asarray(squeeze(UpperCAmelCase__ , axis=2 ) ) ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase__ , axis=1 ) , np.expand_dims(UpperCAmelCase__ , axis=1 ) ) )
@require_torch
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[str] =np.random.randn(3 , 4 )
lowercase : Dict =torch.tensor(UpperCAmelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase__ , axis=1 ) , expand_dims(UpperCAmelCase__ , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =np.random.randn(3 , 4 )
lowercase : List[Any] =tf.constant(UpperCAmelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase__ , axis=1 ) , expand_dims(UpperCAmelCase__ , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =np.random.randn(3 , 4 )
lowercase : Dict =jnp.array(UpperCAmelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase__ , axis=1 ) , np.asarray(expand_dims(UpperCAmelCase__ , axis=1 ) ) ) )
| 702 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool:
lowercase : Optional[int] =first_str.lower().strip()
lowercase : Union[str, Any] =second_str.lower().strip()
# Remove whitespace
lowercase : Optional[int] =first_str.replace(''' ''' , '''''' )
lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__magic_name__ ) != len(__magic_name__ ):
return False
# Default values for count should be 0
lowercase : defaultdict[str, int] =defaultdict(__magic_name__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__magic_name__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input("""Enter the first string """).strip()
UpperCamelCase_ = input("""Enter the second string """).strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 88 | 0 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
lowerCamelCase_ = 'char'
lowerCamelCase_ = 'bpe'
lowerCamelCase_ = 'wp'
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
lowerCamelCase_ = ['image_processor', 'char_tokenizer']
lowerCamelCase_ = 'ViTImageProcessor'
lowerCamelCase_ = 'MgpstrTokenizer'
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : List[Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __A , )
lowercase : List[str] =kwargs.pop('''feature_extractor''' )
lowercase : List[str] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
lowercase : List[Any] =tokenizer
lowercase : List[str] =AutoTokenizer.from_pretrained('''gpt2''' )
lowercase : Dict =AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__A , __A )
def __call__( self : List[str] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowercase : Union[str, Any] =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None:
lowercase : List[str] =self.char_tokenizer(__A , return_tensors=__A , **__A )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase : Any =encodings["input_ids"]
return inputs
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : List[str] =sequences
lowercase : Optional[Any] =char_preds.size(0 )
lowercase : List[Any] =self._decode_helper(__A , '''char''' )
lowercase : Tuple =self._decode_helper(__A , '''bpe''' )
lowercase : int =self._decode_helper(__A , '''wp''' )
lowercase : Dict =[]
lowercase : Dict =[]
for i in range(__A ):
lowercase : List[str] =[char_scores[i], bpe_scores[i], wp_scores[i]]
lowercase : str =[char_strs[i], bpe_strs[i], wp_strs[i]]
lowercase : str =scores.index(max(__A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowercase : Any ={}
lowercase : Union[str, Any] =final_strs
lowercase : Tuple =final_scores
lowercase : Optional[Any] =char_strs
lowercase : Tuple =bpe_strs
lowercase : Dict =wp_strs
return out
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowercase : int =self.char_decode
lowercase : List[str] =1
lowercase : Optional[int] ="[s]"
elif format == DecodeType.BPE:
lowercase : Optional[int] =self.bpe_decode
lowercase : List[str] =2
lowercase : Dict ="#"
elif format == DecodeType.WORDPIECE:
lowercase : Union[str, Any] =self.wp_decode
lowercase : Tuple =102
lowercase : Optional[Any] ="[SEP]"
else:
raise ValueError(F'''Format {format} is not supported.''' )
lowercase : Union[str, Any] =[], []
lowercase : Tuple =pred_logits.size(0 )
lowercase : int =pred_logits.size(1 )
lowercase : Any =pred_logits.topk(1 , dim=-1 , largest=__A , sorted=__A )
lowercase : Tuple =preds_index.view(-1 , __A )[:, 1:]
lowercase : List[Any] =decoder(__A )
lowercase : Optional[Any] =torch.nn.functional.softmax(__A , dim=2 ).max(dim=2 )
lowercase : str =preds_max_prob[:, 1:]
for index in range(__A ):
lowercase : int =preds_str[index].find(__A )
lowercase : Union[str, Any] =preds_str[index][:pred_eos]
lowercase : Any =preds_index[index].cpu().tolist()
lowercase : Union[str, Any] =pred_index.index(__A ) if eos_token in pred_index else -1
lowercase : Union[str, Any] =preds_max_prob[index][: pred_eos_index + 1]
lowercase : List[str] =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__A )
conf_scores.append(__A )
return dec_strs, conf_scores
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Tuple =[seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__A )]
return decode_strs
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__A )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Optional[Any] =[seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__A )]
return decode_strs
| 703 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowercase : Optional[int] =str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
lowercase : Optional[int] =str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
lowercase : Optional[int] =max(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase ) , b_binary.zfill(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
UpperCamelCase_ = trt.Logger(trt.Logger.WARNING)
UpperCamelCase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
UpperCamelCase_ = parser.parse_args()
if args.tokenizer_name:
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
UpperCamelCase_ = args.per_device_eval_batch_size
UpperCamelCase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
UpperCamelCase_ = True
UpperCamelCase_ = """temp_engine/bert-fp32.engine"""
if args.fpaa:
UpperCamelCase_ = """temp_engine/bert-fp16.engine"""
if args.inta:
UpperCamelCase_ = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
UpperCamelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
UpperCamelCase_ = [network.get_input(i) for i in range(network.num_inputs)]
UpperCamelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
UpperCamelCase_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
UpperCamelCase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
UpperCamelCase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> Dict:
lowercase : str =np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowercase : int =np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowercase : Optional[int] =np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __magic_name__ )
# start time
lowercase : List[Any] =time.time()
# Run inference
context.execute_async(
bindings=[int(__magic_name__ ) for d_inp in d_inputs] + [int(__magic_name__ ), int(__magic_name__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase : str =time.time()
lowercase : Tuple =end_time - start_time
lowercase : Any =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
UpperCamelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
UpperCamelCase_ = raw_datasets["""validation"""].column_names
UpperCamelCase_ = """question""" if """question""" in column_names else column_names[0]
UpperCamelCase_ = """context""" if """context""" in column_names else column_names[1]
UpperCamelCase_ = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
UpperCamelCase_ = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
UpperCamelCase_ = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase : List[Any] =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase : Optional[int] =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=__magic_name__ , stride=args.doc_stride , return_overflowing_tokens=__magic_name__ , return_offsets_mapping=__magic_name__ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase : List[str] =tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase : Union[str, Any] =[]
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase : Any =tokenized_examples.sequence_ids(__magic_name__ )
lowercase : Tuple =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase : Union[str, Any] =sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase : Dict =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
UpperCamelCase_ = raw_datasets["""validation"""]
# Validation Feature Creation
UpperCamelCase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
UpperCamelCase_ = default_data_collator
UpperCamelCase_ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
UpperCamelCase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]="eval" ) -> Optional[int]:
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase : Tuple =postprocess_qa_predictions(
examples=__magic_name__ , features=__magic_name__ , predictions=__magic_name__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__magic_name__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase : Dict =[
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase : Union[str, Any] =[{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase : Any =[{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__magic_name__ , label_ids=__magic_name__ )
UpperCamelCase_ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> Any:
return trt.volume(engine.get_binding_shape(__magic_name__ ) ) * engine.get_binding_dtype(__magic_name__ ).itemsize
# Allocate device memory for inputs and outputs.
UpperCamelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
UpperCamelCase_ = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
UpperCamelCase_ = 0.0
UpperCamelCase_ = 0
UpperCamelCase_ = timeit.default_timer()
UpperCamelCase_ = None
for step, batch in enumerate(eval_dataloader):
UpperCamelCase_ , UpperCamelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
UpperCamelCase_ , UpperCamelCase_ = outputs
UpperCamelCase_ = torch.tensor(start_logits)
UpperCamelCase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
UpperCamelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
UpperCamelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
UpperCamelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
UpperCamelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
UpperCamelCase_ = nested_truncate(all_preds, len(eval_dataset))
UpperCamelCase_ = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
UpperCamelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
UpperCamelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 705 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 88 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("""fixtures""")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =mock.Mock()
lowercase : List[Any] =500
lowercase : List[str] ={}
lowercase : List[Any] =HTTPError
lowercase : Optional[int] ={}
# Download this model to make sure it's in the cache.
lowercase : Optional[int] =ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=a_ ) as mock_head:
lowercase : str =ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[int] =ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
with self.assertRaises(a_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(a_ )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def lowerCamelCase_ ( cls : Dict ):
'''simple docstring'''
lowercase : Union[str, Any] =TOKEN
HfFolder.save_token(a_ )
@classmethod
def lowerCamelCase_ ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =ViTImageProcessor.from_pretrained(a_ )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
lowercase : Tuple =ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a_ , repo_id='''test-image-processor''' , push_to_hub=a_ , use_auth_token=self._token )
lowercase : Optional[Any] =ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Optional[int] =ViTImageProcessor.from_pretrained(a_ )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
lowercase : Union[str, Any] =ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a_ , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=a_ , use_auth_token=self._token )
lowercase : List[Any] =ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a_ , getattr(a_ , a_ ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
lowercase : List[str] =CustomImageProcessor.from_pretrained(a_ )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 706 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ = """transformer"""
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase_ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : int=99 , UpperCAmelCase__ : str=36 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Tuple=1000 , ):
'''simple docstring'''
lowercase : Any =parent
lowercase : int =batch_size
lowercase : Tuple =num_channels
lowercase : Optional[Any] =image_size
lowercase : int =patch_size
lowercase : int =text_seq_length
lowercase : List[Any] =is_training
lowercase : Optional[Any] =use_input_mask
lowercase : Tuple =use_token_type_ids
lowercase : Optional[Any] =use_labels
lowercase : Dict =vocab_size
lowercase : Optional[Any] =hidden_size
lowercase : List[str] =num_hidden_layers
lowercase : List[Any] =num_attention_heads
lowercase : str =intermediate_size
lowercase : Optional[int] =hidden_act
lowercase : Union[str, Any] =hidden_dropout_prob
lowercase : Any =attention_probs_dropout_prob
lowercase : str =max_position_embeddings
lowercase : Any =type_vocab_size
lowercase : Optional[Any] =type_sequence_label_size
lowercase : Any =initializer_range
lowercase : int =coordinate_size
lowercase : Optional[int] =shape_size
lowercase : Optional[int] =num_labels
lowercase : Optional[int] =num_choices
lowercase : List[Any] =scope
lowercase : str =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase : Tuple =text_seq_length
lowercase : Optional[int] =(image_size // patch_size) ** 2 + 1
lowercase : Optional[Any] =self.text_seq_length + self.image_seq_length
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowercase : Optional[int] =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase : List[Any] =bbox[i, j, 3]
lowercase : List[str] =bbox[i, j, 1]
lowercase : int =t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase : Any =bbox[i, j, 2]
lowercase : str =bbox[i, j, 0]
lowercase : List[Any] =t
lowercase : Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : List[str] =None
if self.use_input_mask:
lowercase : List[Any] =random_attention_mask([self.batch_size, self.text_seq_length] )
lowercase : Optional[Any] =None
if self.use_token_type_ids:
lowercase : Tuple =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowercase : List[str] =None
lowercase : Any =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : int =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowercase : Dict =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Tuple =LayoutLMvaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# text + image
lowercase : List[Any] =model(__lowerCAmelCase , pixel_values=__lowerCAmelCase )
lowercase : List[str] =model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowercase : List[str] =model(__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowercase : Union[str, Any] =model(__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowercase : Any =model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowercase : Optional[Any] =model(pixel_values=__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Dict =LayoutLMvaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase : Tuple =model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : List[str] =LayoutLMvaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase : Dict =model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =LayoutLMvaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase : int =model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Any =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : Optional[Any] ={
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
return True
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =LayoutLMvaModelTester(self )
lowercase : List[str] =ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any=False ):
'''simple docstring'''
lowercase : int =copy.deepcopy(__lowerCAmelCase )
if model_class in get_values(__lowerCAmelCase ):
lowercase : str ={
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__lowerCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
lowercase : Union[str, Any] =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
elif model_class in get_values(__lowerCAmelCase ):
lowercase : str =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
lowercase : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
elif model_class in [
*get_values(__lowerCAmelCase ),
]:
lowercase : Union[str, Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
elif model_class in [
*get_values(__lowerCAmelCase ),
]:
lowercase : Union[str, Any] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__lowerCAmelCase , )
return inputs_dict
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Tuple =type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[str] =LayoutLMvaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowerCAmelCase ( ) -> Optional[Any]:
lowercase : str =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__lowerCAmelCase ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(__lowerCAmelCase )
lowercase : Tuple =self.default_image_processor
lowercase : Any =prepare_img()
lowercase : str =image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).pixel_values.to(__lowerCAmelCase )
lowercase : str =torch.tensor([[1, 2]] )
lowercase : str =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowercase : Optional[int] =model(
input_ids=input_ids.to(__lowerCAmelCase ) , bbox=bbox.to(__lowerCAmelCase ) , pixel_values=pixel_values.to(__lowerCAmelCase ) , )
# verify the logits
lowercase : Optional[int] =torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __lowerCAmelCase )
lowercase : Optional[Any] =torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 707 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase : Optional[int] =str(abs(UpperCAmelCase__ ) )
lowercase : Optional[Any] =[list(UpperCAmelCase__ ) for char in range(len(UpperCAmelCase__ ) )]
for index in range(len(UpperCAmelCase__ ) ):
num_transpositions[index].pop(UpperCAmelCase__ )
return max(
int(''''''.join(list(UpperCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 708 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __SCREAMING_SNAKE_CASE ( a__ ):
lowerCamelCase_ = """gptj"""
lowerCamelCase_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , UpperCAmelCase__ : Tuple=50400 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[int]=4096 , UpperCAmelCase__ : str=28 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int="gelu_new" , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : List[Any]=1E-5 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Union[str, Any]=50256 , UpperCAmelCase__ : str=50256 , UpperCAmelCase__ : Optional[int]=False , **UpperCAmelCase__ : int , ):
'''simple docstring'''
lowercase : int =vocab_size
lowercase : Tuple =n_positions
lowercase : List[Any] =n_embd
lowercase : int =n_layer
lowercase : Union[str, Any] =n_head
lowercase : Optional[int] =n_inner
lowercase : Any =rotary_dim
lowercase : Any =activation_function
lowercase : Optional[int] =resid_pdrop
lowercase : Optional[int] =embd_pdrop
lowercase : int =attn_pdrop
lowercase : Optional[Any] =layer_norm_epsilon
lowercase : Optional[int] =initializer_range
lowercase : Union[str, Any] =use_cache
lowercase : Union[str, Any] =bos_token_id
lowercase : Dict =eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( a__ ):
def __init__( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : str = "default" , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : Optional[int] = False , ):
'''simple docstring'''
super().__init__(lowerCAmelCase__ , task=lowerCAmelCase__ , patching_specs=lowerCAmelCase__ , use_past=lowerCAmelCase__ )
if not getattr(self._config , '''pad_token_id''' , lowerCAmelCase__ ):
# TODO: how to do that better?
lowercase : Optional[Any] =0
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Union[str, Any] =OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction='''inputs''' )
lowercase : Tuple ={0: "batch", 1: "past_sequence + sequence"}
else:
lowercase : List[Any] ={0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict = -1 , UpperCAmelCase__ : str = -1 , UpperCAmelCase__ : Tuple = False , UpperCAmelCase__ : Dict = None , ):
'''simple docstring'''
lowercase : List[Any] =super(lowerCAmelCase__ , self ).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
lowercase : Dict =OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase : Optional[int] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase : Optional[int] =seqlen + 2
lowercase : Optional[int] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase : List[Any] =[
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
lowercase : List[str] =common_inputs["attention_mask"]
if self.use_past:
lowercase : List[str] =ordered_inputs["attention_mask"].dtype
lowercase : Union[str, Any] =torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return 13
| 709 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
lowerCamelCase_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCamelCase_ = Features({'text': Value('string' )} )
lowerCamelCase_ = Features({} )
lowerCamelCase_ = 'text'
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return {self.text_column: "text"}
| 710 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88 | 0 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
UpperCamelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCamelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCamelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCamelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCamelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCamelCase_ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCamelCase_ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCamelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCamelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCamelCase_ = re.compile(r"""^\s*else:""")
def _lowerCAmelCase ( __magic_name__ : int ) -> str:
if _re_test_backend.search(_snake_case ) is None:
return None
lowercase : List[Any] =[b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Optional[int]:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : List[Any] =f.readlines()
lowercase : Optional[int] =0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase : str =[]
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase : int =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowercase : Optional[Any] =_re_one_line_import_struct.search(_snake_case ).groups()[0]
lowercase : Any =re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase : List[str] =_re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowercase : Union[str, Any] =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase : List[str] ={'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase : List[Any] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : Optional[int] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : Optional[int] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase : List[Any] =lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowercase : str =_re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowercase : Dict =[obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowercase : Union[str, Any] =_re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowercase : List[Any] =[obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase : Tuple =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase : str =[]
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase : Any =lines[line_index]
lowercase : Optional[int] =_re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase : Dict ={'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase : Union[str, Any] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : List[str] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : Union[str, Any] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase : Optional[Any] =lines[line_index]
lowercase : int =_re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase : str =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
def find_duplicates(__magic_name__ : Optional[Any] ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase : List[Any] =[]
for key in import_dict_objects.keys():
lowercase : int =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase : str =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase : List[str] ='''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _lowerCAmelCase ( ) -> List[str]:
lowercase : int =[]
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowercase : List[Any] =os.path.join(_snake_case , '''__init__.py''' )
lowercase : Any =parse_init(_snake_case )
if objects is not None:
lowercase : Dict =analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowercase : Any =f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _lowerCAmelCase ( ) -> Any:
lowercase : str =[]
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase : Optional[int] =str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowercase : str =short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowercase : Any =str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowercase : Optional[int] =short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCamelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def _lowerCAmelCase ( ) -> str:
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Union[str, Any] =importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase : Any =spec.loader.load_module()
lowercase : List[str] =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowercase : Tuple ='''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 711 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : int ={}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
lowercase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
with open(__magic_name__ ) as f:
lowercase : Optional[int] =f.read(1 )
lowercase : List[Any] =start_node
lowercase : List[Any] =[]
lowercase : str =start_node
lowercase : str =0
while visiting not in first_solution:
lowercase : Optional[int] =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
lowercase : List[Any] =k[1]
lowercase : str =k[0]
first_solution.append(__magic_name__ )
lowercase : Any =distance_of_first_solution + int(__magic_name__ )
lowercase : Optional[int] =best_node
first_solution.append(__magic_name__ )
lowercase : str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple:
lowercase : Tuple =[]
for n in solution[1:-1]:
lowercase : Dict =solution.index(__magic_name__ )
for kn in solution[1:-1]:
lowercase : Tuple =solution.index(__magic_name__ )
if n == kn:
continue
lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ )
lowercase : Optional[int] =kn
lowercase : List[Any] =n
lowercase : List[Any] =0
for k in _tmp[:-1]:
lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[int] =distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : str =1
lowercase : List[Any] =first_solution
lowercase : Any =[]
lowercase : str =distance_of_first_solution
lowercase : str =solution
while count <= iters:
lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ )
lowercase : Dict =0
lowercase : int =neighborhood[index_of_best_solution]
lowercase : Optional[int] =len(__magic_name__ ) - 1
lowercase : List[Any] =False
while not found:
lowercase : List[Any] =0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
lowercase : List[str] =best_solution[i]
lowercase : Dict =solution[i]
break
lowercase : Any =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : str =True
lowercase : int =best_solution[:-1]
lowercase : Any =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : Optional[int] =cost
lowercase : str =solution
else:
lowercase : Optional[int] =index_of_best_solution + 1
lowercase : List[Any] =neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
lowercase : Optional[int] =count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple:
lowercase : List[str] =generate_neighbours(args.File )
lowercase , lowercase : Optional[Any] =generate_first_solution(
args.File , __magic_name__ )
lowercase , lowercase : int =tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ , unittest.TestCase ):
lowerCamelCase_ = WavaVecaPhonemeCTCTokenizer
lowerCamelCase_ = False
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =(
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
lowercase : Tuple =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowercase : Dict ={'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
lowercase : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Dict=20 , UpperCAmelCase__ : Optional[int]=5 ):
'''simple docstring'''
lowercase : Union[str, Any] =[(i, tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )) for i in range(len(UpperCamelCase__ ) )]
lowercase : str =list(filter(lambda UpperCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
lowercase : int =toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
lowercase : int =toks + toks
# toks_str = [t[1] for t in toks]
lowercase : Dict =[t[0] for t in toks]
# Ensure consistency
lowercase : List[Any] =tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
lowercase : List[str] =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
lowercase : Tuple =''' ''' + output_txt
lowercase : Union[str, Any] =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def lowerCamelCase_ ( self : Tuple , **UpperCAmelCase__ : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
lowercase : Union[str, Any] =tokenizer('''m xxx ɪ''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
lowercase : List[str] =tokenizer('''m aaa ɪ ccc''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowercase : Dict =tokenizer('''maɪ c''' , do_phonemize=UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , [3, 200] ) # mai should be <unk> (=3)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowercase : int ='''Hello how are you'''
lowercase : List[Any] =tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowercase : List[Any] ='''Hello how are you'''
lowercase : Optional[Any] =tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase__ ).input_ids , tokenizer(UpperCamelCase__ , do_phonemize=UpperCamelCase__ ).input_ids )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowercase : Dict ='''Hello how are you'''
lowercase : int =tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
lowercase : Union[str, Any] =tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowercase : List[str] =[
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowercase : Any =tokenizer.decode(sample_ids[0] )
lowercase : Optional[Any] =tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowercase : Union[str, Any] ='''Hello how are you'''
lowercase : Optional[int] =tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowercase : Dict ='''Hello how are you'''
lowercase : List[Any] =tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase__ ).input_ids , tokenizer(UpperCamelCase__ , do_phonemize=UpperCamelCase__ ).input_ids )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
lowercase : str =[
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowercase : Any =tokenizer.decode(sample_ids[0] )
lowercase : Optional[Any] =tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
lowercase : int =tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=UpperCamelCase__ )
lowercase : Tuple =tokenizer.batch_decode(UpperCamelCase__ , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch_tokens[0] )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Union[str, Any] =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowercase : int ='''Hello how are you'''
lowercase : str =tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
lowercase : int =tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : str =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowercase : Tuple ='''Hello how are you'''
lowercase : Optional[Any] =tokenizer.phonemize(UpperCamelCase__ , phonemizer_lang='''en-us''' )
lowercase : List[Any] =tokenizer.decode(tokenizer(UpperCamelCase__ ).input_ids , filter_word_delimiter_token=UpperCamelCase__ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , UpperCamelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=UpperCamelCase__ )
lowercase : Optional[Any] ='''Hello how are you'''
lowercase : Optional[Any] =tokenizer(UpperCamelCase__ , phonemizer_lang='''en-us''' ).input_ids
lowercase : Tuple =tokenizer(UpperCamelCase__ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase : Tuple =tokenizer.decode(UpperCamelCase__ )
lowercase : List[str] =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(UpperCamelCase__ , '''ɛ l o h aʊ a ʁ j u''' )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowercase : Dict ='''Hello how Are you'''
lowercase : str ='''hello how are you'''
lowercase : Tuple =tokenizer(UpperCamelCase__ ).input_ids
lowercase : Dict =tokenizer(UpperCamelCase__ ).input_ids
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Tuple =self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
lowercase : Dict =[
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowercase : str =tokenizer.batch_decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : Union[str, Any] =[d[key] for d in offsets]
return retrieved_list
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[int] =self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowercase : Any =[11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowercase : Tuple =tokenizer.decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ , filter_word_delimiter_token=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(isinstance(outputs_list[0] , UpperCamelCase__ ) )
# transform list to ModelOutput
lowercase : Any =WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
[recursive_check(UpperCamelCase__ , UpperCamelCase__ ) for la, la in zip(UpperCamelCase__ , UpperCamelCase__ )]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
lowercase : str =[
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowercase : List[Any] =tokenizer.batch_decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ )
lowercase : Optional[int] =[tokenizer.decode(UpperCamelCase__ , output_char_offsets=UpperCamelCase__ ) for ids in sample_ids]
check_list_tuples_equal(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Any =self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase : Union[str, Any] =tokenizer.vocab_size
lowercase : List[str] =len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowercase : Dict =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
lowercase : List[str] =tokenizer.add_tokens(UpperCamelCase__ )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : Tuple =len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , len(UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , all_size + len(UpperCamelCase__ ) )
lowercase : Union[str, Any] =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=UpperCamelCase__ )
self.assertGreaterEqual(len(UpperCamelCase__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowercase : Union[str, Any] ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
lowercase : Optional[Any] =tokenizer.add_special_tokens(UpperCamelCase__ )
lowercase : Union[str, Any] =tokenizer.vocab_size
lowercase : List[Any] =len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , len(UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , all_size_a + len(UpperCamelCase__ ) )
lowercase : Optional[int] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=UpperCamelCase__ )
self.assertGreaterEqual(len(UpperCamelCase__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Optional[int] =self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase : Tuple =['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(output['''text'''] , UpperCamelCase__ )
| 712 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 0 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : List[Any]=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=99 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Dict=37 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Dict=None , ):
'''simple docstring'''
lowercase : Any =parent
lowercase : int =batch_size
lowercase : Any =seq_length
lowercase : int =is_training
lowercase : List[str] =use_input_mask
lowercase : Union[str, Any] =use_token_type_ids
lowercase : List[str] =use_labels
lowercase : Dict =vocab_size
lowercase : Optional[Any] =hidden_size
lowercase : str =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : List[str] =hidden_act
lowercase : Optional[Any] =hidden_dropout_prob
lowercase : List[str] =attention_probs_dropout_prob
lowercase : int =max_position_embeddings
lowercase : Optional[Any] =type_vocab_size
lowercase : int =type_sequence_label_size
lowercase : List[str] =initializer_range
lowercase : Dict =num_labels
lowercase : Dict =num_choices
lowercase : Any =scope
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[int] =None
if self.use_input_mask:
lowercase : Tuple =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
if self.use_token_type_ids:
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Tuple =None
lowercase : Tuple =None
lowercase : Union[str, Any] =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Dict =ids_tensor([self.batch_size] , self.num_choices )
lowercase : str =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : Dict =BioGptModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase : Dict =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
lowercase : List[Any] =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : Tuple =BioGptForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase : List[str] =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : List[Any] =BioGptModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# create attention mask
lowercase : Tuple =torch.ones(input_ids.shape , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
lowercase : List[str] =self.seq_length // 2
lowercase : List[Any] =0
# first forward pass
lowercase , lowercase : int =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase : Optional[Any] =ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowercase : List[Any] =ids_tensor((1,) , _SCREAMING_SNAKE_CASE ).item() + 1
lowercase : List[Any] =ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowercase : List[Any] =random_other_next_tokens
# append to next input_ids and attn_mask
lowercase : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase : Optional[Any] =torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )] , dim=1 , )
# get two different outputs
lowercase : Optional[int] =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )['''last_hidden_state''']
lowercase : int =model(_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )['''last_hidden_state''']
# select random slice
lowercase : Union[str, Any] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase : Dict =output_from_no_past[:, -1, random_slice_idx].detach()
lowercase : List[str] =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , *UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : Tuple =BioGptModel(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).eval()
lowercase : Tuple =torch.ones(input_ids.shape , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
# first forward pass
lowercase : List[Any] =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
lowercase , lowercase : Tuple =outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowercase : Tuple =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : Optional[Any] =ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowercase : str =torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase : Union[str, Any] =torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowercase : Any =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )['''last_hidden_state''']
lowercase : List[str] =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[
'''last_hidden_state'''
]
# select random slice
lowercase : str =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase : Optional[int] =output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase : Tuple =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any]=False ):
'''simple docstring'''
lowercase : Tuple =BioGptForCausalLM(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowercase : Any =model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str , *UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : int =BioGptModel(_SCREAMING_SNAKE_CASE )
lowercase : int =model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , *UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : List[str] =self.num_labels
lowercase : Tuple =BioGptForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase : List[Any] =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Any =config_and_inputs
lowercase : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCamelCase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =BioGptModelTester(self )
lowercase : Optional[int] =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : str =type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_SCREAMING_SNAKE_CASE , gradient_checkpointing=_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_SCREAMING_SNAKE_CASE )
lowercase : Optional[Any] =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[Any] ='''left'''
# Define PAD Token = EOS Token = 50256
lowercase : Dict =tokenizer.eos_token
lowercase : int =model.config.eos_token_id
# use different length sentences to test batching
lowercase : Dict =[
'''Hello, my dog is a little''',
'''Today, I''',
]
lowercase : int =tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=_SCREAMING_SNAKE_CASE )
lowercase : int =inputs['''input_ids'''].to(_SCREAMING_SNAKE_CASE )
lowercase : Dict =model.generate(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''].to(_SCREAMING_SNAKE_CASE ) , )
lowercase : List[Any] =tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_SCREAMING_SNAKE_CASE )
lowercase : Optional[Any] =model.generate(input_ids=_SCREAMING_SNAKE_CASE )
lowercase : Any =inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
lowercase : Dict =tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_SCREAMING_SNAKE_CASE )
lowercase : List[Any] =model.generate(input_ids=_SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings )
lowercase : Tuple =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowercase : Dict =tokenizer.decode(output_non_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowercase : Optional[Any] =tokenizer.decode(output_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowercase : Optional[Any] =[
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[Any] =BioGptModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase , lowercase : str =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : str =3
lowercase : Tuple =input_dict['''input_ids''']
lowercase : Any =input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
lowercase : Optional[int] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase : Tuple =BioGptForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase : List[str] =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Union[str, Any] =3
lowercase : Union[str, Any] ='''multi_label_classification'''
lowercase : Optional[Any] =input_dict['''input_ids''']
lowercase : Optional[int] =input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
lowercase : str =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase : List[Any] =BioGptForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase : Dict =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[str] =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
lowercase : Tuple =torch.tensor([[2, 4805, 9, 656, 21]] )
lowercase : Dict =model(_SCREAMING_SNAKE_CASE )[0]
lowercase : Dict =42384
lowercase : Tuple =torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowercase : List[str] =torch.tensor(
[[[-9.52_36, -9.89_18, 10.4557], [-11.0469, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[str] =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
lowercase : str =tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
lowercase : Optional[Any] =model.generate(
**_SCREAMING_SNAKE_CASE , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_SCREAMING_SNAKE_CASE , )
lowercase : List[Any] =tokenizer.decode(output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowercase : str =(
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 713 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BioGptTokenizer
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : str ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase : Any ='''lower'''
lowercase : int =['''low''', '''er</w>''']
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =tokens + ['''<unk>''']
lowercase : Any =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 88 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase : Tuple =nn.Embedding(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : List[str] =nn.Embedding(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Dict =False
lowercase : Optional[Any] =nn.Dropout(p=UpperCAmelCase__ )
lowercase : Optional[Any] =TaConfig(
vocab_size=UpperCAmelCase__ , d_model=UpperCAmelCase__ , num_heads=UpperCAmelCase__ , d_kv=UpperCAmelCase__ , d_ff=UpperCAmelCase__ , dropout_rate=UpperCAmelCase__ , feed_forward_proj=UpperCAmelCase__ , is_decoder=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , )
lowercase : Optional[int] =nn.ModuleList()
for lyr_num in range(UpperCAmelCase__ ):
lowercase : int =TaBlock(UpperCAmelCase__ )
self.encoders.append(UpperCAmelCase__ )
lowercase : Optional[Any] =TaLayerNorm(UpperCAmelCase__ )
lowercase : Tuple =nn.Dropout(p=UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : Union[str, Any] =self.token_embedder(UpperCAmelCase__ )
lowercase : Union[str, Any] =encoder_input_tokens.shape[1]
lowercase : Union[str, Any] =torch.arange(UpperCAmelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCAmelCase__ )
lowercase : List[str] =self.dropout_pre(UpperCAmelCase__ )
# inverted the attention mask
lowercase : List[Any] =encoder_input_tokens.size()
lowercase : Dict =self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ )
for lyr in self.encoders:
lowercase : Dict =lyr(UpperCAmelCase__ , UpperCAmelCase__ )[0]
lowercase : int =self.layer_norm(UpperCAmelCase__ )
return self.dropout_post(UpperCAmelCase__ ), encoder_inputs_mask
| 714 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( ) -> Dict:
lowercase : int =10
lowercase : str =datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
lowercase : Any =datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(SCREAMING_SNAKE_CASE_ ) ),
} , features=SCREAMING_SNAKE_CASE_ , )
return dataset
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : str ) -> List[Any]:
lowercase : Union[str, Any] =str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE_ )
return filename
# FILE_CONTENT + files
UpperCamelCase_ = """\
Text data.
Second line of data."""
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> Dict:
lowercase : List[str] =tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
lowercase : Optional[int] =FILE_CONTENT
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return filename
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
import bza
lowercase : List[Any] =tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
lowercase : str =bytes(SCREAMING_SNAKE_CASE_ , '''utf-8''' )
with bza.open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> int:
import gzip
lowercase : Dict =str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
lowercase : List[str] =bytes(SCREAMING_SNAKE_CASE_ , '''utf-8''' )
with gzip.open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : List[Any] ) -> Any:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowercase : Optional[int] =tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
lowercase : List[Any] =bytes(SCREAMING_SNAKE_CASE_ , '''utf-8''' )
with lza.frame.open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> int:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowercase : str =tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as archive:
archive.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Any:
import tarfile
lowercase : int =tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.add(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Union[str, Any]:
import lzma
lowercase : Tuple =tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
lowercase : Tuple =bytes(SCREAMING_SNAKE_CASE_ , '''utf-8''' )
with lzma.open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Tuple:
import zipfile
lowercase : Tuple =tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> Tuple:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowercase : Tuple =tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
lowercase : int =bytes(SCREAMING_SNAKE_CASE_ , '''utf-8''' )
with zstd.open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Any ) -> int:
lowercase : Tuple =tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
lowercase : List[Any] =textwrap.dedent(
'''\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return filename
UpperCamelCase_ = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
UpperCamelCase_ = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
UpperCamelCase_ = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
UpperCamelCase_ = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
UpperCamelCase_ = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( ) -> str:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[int]:
lowercase : List[Any] =datasets.Dataset.from_dict(SCREAMING_SNAKE_CASE_ )
lowercase : int =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : Optional[Any] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE_ ) ) as con:
lowercase : Any =con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
lowercase : str =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , newline='''''' ) as f:
lowercase : Optional[int] =csv.DictWriter(SCREAMING_SNAKE_CASE_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[int]:
lowercase : List[str] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , newline='''''' ) as f:
lowercase : Optional[Any] =csv.DictWriter(SCREAMING_SNAKE_CASE_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : List[str] ) -> int:
import bza
lowercase : Tuple =tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
lowercase : str =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> Tuple:
lowercase : Union[str, Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int ) -> Optional[int]:
lowercase : Optional[Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> List[str]:
lowercase : str =tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> str:
lowercase : Union[str, Any] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
lowercase : Dict =pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
lowercase : List[str] =pq.ParquetWriter(SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ )
lowercase : Tuple =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(SCREAMING_SNAKE_CASE_ ) )] for k in DATA[0]} , schema=SCREAMING_SNAKE_CASE_ )
writer.write_table(SCREAMING_SNAKE_CASE_ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> int:
lowercase : Union[str, Any] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowercase : List[Any] ={'''data''': DATA}
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> int:
lowercase : Dict =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowercase : List[Any] ={'''data''': DATA_DICT_OF_LISTS}
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[int]:
lowercase : List[str] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : str ) -> Optional[Any]:
lowercase : List[Any] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> List[Any]:
lowercase : Optional[int] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> Optional[Any]:
lowercase : List[str] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]:
import gzip
lowercase : Dict =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Tuple:
import gzip
lowercase : str =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> List[Any]:
lowercase : Union[str, Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Optional[Any] ) -> Tuple:
lowercase : int =tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join('''nested''' , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Tuple ) -> List[Any]:
lowercase : List[str] =tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
lowercase : Union[str, Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.add(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
f.add(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : int ) -> List[Any]:
lowercase : Optional[Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.add(SCREAMING_SNAKE_CASE_ , arcname=os.path.join('''nested''' , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> str:
lowercase : Optional[int] =['''0''', '''1''', '''2''', '''3''']
lowercase : Optional[int] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Any ) -> List[Any]:
lowercase : Union[str, Any] =['''0''', '''1''', '''2''', '''3''']
lowercase : Tuple =str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Any:
lowercase : Optional[Any] =['''0''', '''1''', '''2''', '''3''']
lowercase : int =tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Any:
lowercase : Tuple =tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ) -> int:
lowercase : List[Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> int:
lowercase : Optional[int] =tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : str ) -> List[Any]:
lowercase : str ='''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
lowercase : str =str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( ) -> Dict:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( ) -> List[str]:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Dict:
lowercase : int =tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ) )
f.write(SCREAMING_SNAKE_CASE_ , arcname=os.path.basename(SCREAMING_SNAKE_CASE_ ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> str:
lowercase : int =tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 715 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase : Optional[Any] =AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A_ )
lowercase : Union[str, Any] =-1
lowercase : Any =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowercase : Dict =model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowercase : List[Any] =tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase : List[str] =TextStreamer(A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase : Any =cs.out[:-1]
self.assertEqual(A_ , A_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : List[Any] =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase : int =AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A_ )
lowercase : Optional[int] =-1
lowercase : int =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowercase : Optional[int] =model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowercase : str =tokenizer.decode(greedy_ids[0] )
lowercase : int =TextIteratorStreamer(A_ )
lowercase : str ={"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase : List[Any] =Thread(target=model.generate , kwargs=A_ )
thread.start()
lowercase : int =""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A_ , A_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase : Dict =AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A_ )
lowercase : List[str] =-1
lowercase : List[str] =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowercase : Optional[Any] =model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowercase : Any =greedy_ids[:, input_ids.shape[1] :]
lowercase : Union[str, Any] =tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase : int =TextStreamer(A_ , skip_prompt=A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase : Any =cs.out[:-1]
self.assertEqual(A_ , A_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Union[str, Any] =AutoTokenizer.from_pretrained('''distilgpt2''' )
lowercase : Any =AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A_ )
lowercase : Union[str, Any] =-1
lowercase : Tuple =torch.ones((1, 5) , device=A_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase : List[str] =TextStreamer(A_ , skip_special_tokens=A_ )
model.generate(A_ , max_new_tokens=1 , do_sample=A_ , streamer=A_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase : List[str] =cs.out[:-1] # Remove the final "\n"
lowercase : int =tokenizer(A_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase : Dict =AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A_ )
lowercase : str =-1
lowercase : int =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowercase : List[Any] =TextIteratorStreamer(A_ , timeout=0.0_01 )
lowercase : Tuple ={"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase : Any =Thread(target=model.generate , kwargs=A_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A_ ):
lowercase : Tuple =""
for new_text in streamer:
streamer_text += new_text
| 716 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 | 0 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCamelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 131072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
}
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ) -> List[Any]:
return torch.atana(__magic_name__ , __magic_name__ ) / math.pi * 2
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Any:
lowercase : Tuple =torch.sin(t * math.pi / 2 ) ** 2
lowercase : str =(1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__magic_name__ , __magic_name__ )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
super().__init__()
lowercase : str =DiffusionAttnUnetaD(UpperCAmelCase__ , n_attn_layers=4 )
lowercase : int =deepcopy(self.diffusion )
lowercase : List[Any] =torch.quasirandom.SobolEngine(1 , scramble=UpperCAmelCase__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[int]:
lowercase : int =MODELS_MAP[model_name]['''url''']
os.system(f'''wget {url} ./''' )
return f'''./{model_name}.ckpt'''
UpperCamelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
UpperCamelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
UpperCamelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
UpperCamelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
UpperCamelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
UpperCamelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> Tuple:
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _lowerCAmelCase ( __magic_name__ : str ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(__magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
return name.replace(__magic_name__ , __magic_name__ )
elif name.startswith(__magic_name__ ):
return [name.replace(__magic_name__ , __magic_name__ ) for v in value]
raise ValueError(f'''Attn error with {name}''' )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Optional[int]=13 ) -> Optional[int]:
lowercase : List[Any] =input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
lowercase : int =0
if string.startswith('''net.3.''' ):
depth += 1
lowercase : Tuple =string[6:]
elif string.startswith('''net.''' ):
lowercase : Dict =string[4:]
while string.startswith('''main.7.''' ):
depth += 1
lowercase : Optional[Any] =string[7:]
if string.startswith('''main.''' ):
lowercase : Dict =string[5:]
# mid block
if string[:2].isdigit():
lowercase : Optional[int] =string[:2]
lowercase : Optional[int] =string[2:]
else:
lowercase : str =string[0]
lowercase : str =string[1:]
if depth == max_depth:
lowercase : Optional[int] =MID_NUM_TO_LAYER[layer_num]
lowercase : Optional[int] ='''mid_block'''
elif depth > 0 and int(__magic_name__ ) < 7:
lowercase : str =DOWN_NUM_TO_LAYER[layer_num]
lowercase : List[str] =f'''down_blocks.{depth}'''
elif depth > 0 and int(__magic_name__ ) > 7:
lowercase : List[Any] =UP_NUM_TO_LAYER[layer_num]
lowercase : Dict =f'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
lowercase : Tuple =DEPTH_0_TO_LAYER[layer_num]
lowercase : List[str] =f'''up_blocks.{max_depth - 1}''' if int(__magic_name__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' )
lowercase : Tuple =string_left[1:]
if "resnets" in new_layer:
lowercase : List[str] =convert_resconv_naming(__magic_name__ )
elif "attentions" in new_layer:
lowercase : Union[str, Any] =convert_attn_naming(__magic_name__ )
lowercase : Optional[int] =new_string_left
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase : Optional[Any] =prefix + '''.''' + new_layer + '''.''' + string_left
else:
lowercase : Union[str, Any] =[prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def _lowerCAmelCase ( __magic_name__ : List[Any] ) -> Optional[Any]:
lowercase : Optional[Any] ={}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
lowercase : Dict =rename(__magic_name__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(__magic_name__ , __magic_name__ ):
lowercase : int =transform_conv_attns(__magic_name__ , __magic_name__ , __magic_name__ )
else:
lowercase : Optional[Any] =v
return new_state_dict
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : int ) -> List[Any]:
if len(__magic_name__ ) == 1:
if len(v.shape ) == 3:
# weight
lowercase : List[Any] =v[:, :, 0]
else:
# bias
lowercase : Optional[Any] =v
else:
# qkv matrices
lowercase : Dict =v.shape[0]
lowercase : str =trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowercase : List[str] =v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowercase : Optional[int] =v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _lowerCAmelCase ( __magic_name__ : Dict ) -> str:
lowercase : List[str] =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowercase : str =args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
lowercase : int =download(__magic_name__ )
lowercase : Optional[Any] =MODELS_MAP[model_name]['''sample_rate''']
lowercase : List[Any] =MODELS_MAP[model_name]['''sample_size''']
lowercase : Union[str, Any] =Object()
lowercase : Tuple =sample_size
lowercase : Tuple =sample_rate
lowercase : Optional[int] =0
lowercase : List[Any] =UNetaDModel(sample_size=__magic_name__ , sample_rate=__magic_name__ )
lowercase : Optional[Any] =diffusers_model.state_dict()
lowercase : Tuple =DiffusionUncond(__magic_name__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__magic_name__ )['''state_dict'''] )
lowercase : Tuple =orig_model.diffusion_ema.eval()
lowercase : str =orig_model.state_dict()
lowercase : str =rename_orig_weights(__magic_name__ )
lowercase : Dict =set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowercase : Optional[Any] =set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__magic_name__ ) == 0, f'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(__magic_name__ ) ), f'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
lowercase : Any =value.squeeze()
lowercase : Dict =value
diffusers_model.load_state_dict(__magic_name__ )
lowercase : Any =100
lowercase : Optional[Any] =33
lowercase : List[str] =IPNDMScheduler(num_train_timesteps=__magic_name__ )
lowercase : Tuple =torch.manual_seed(__magic_name__ )
lowercase : Tuple =torch.randn([1, 2, config.sample_size] , generator=__magic_name__ ).to(__magic_name__ )
lowercase : Union[str, Any] =torch.linspace(1 , 0 , steps + 1 , device=__magic_name__ )[:-1]
lowercase : int =get_crash_schedule(__magic_name__ )
lowercase : Any =DanceDiffusionPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
lowercase : Optional[int] =torch.manual_seed(33 )
lowercase : int =pipe(num_inference_steps=__magic_name__ , generator=__magic_name__ ).audios
lowercase : int =sampling.iplms_sample(__magic_name__ , __magic_name__ , __magic_name__ , {} )
lowercase : str =generated.clamp(-1 , 1 )
lowercase : List[str] =(generated - audio).abs().sum()
lowercase : Optional[Any] =(generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , __magic_name__ )
print('''Diff max''' , __magic_name__ )
assert diff_max < 1E-3, f'''Diff max: {diff_max} is too much :-/'''
print(f'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
UpperCamelCase_ = parser.parse_args()
main(args)
| 717 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'vision-encoder-decoder'
lowerCamelCase_ = True
def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase : Optional[Any] =kwargs.pop('''encoder''' )
lowercase : List[Any] =encoder_config.pop('''model_type''' )
lowercase : List[str] =kwargs.pop('''decoder''' )
lowercase : Dict =decoder_config.pop('''model_type''' )
lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =True
@classmethod
def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase : int =True
lowercase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =copy.deepcopy(self.__dict__ )
lowercase : Union[str, Any] =self.encoder.to_dict()
lowercase : Union[str, Any] =self.decoder.to_dict()
lowercase : int =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =OrderedDict()
lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
lowercase : Optional[Any] =OrderedDict()
lowercase : List[Any] =super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape
lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase : List[str] =dummy_input.pop('''input_ids''' )
lowercase : Tuple =dummy_input.pop('''attention_mask''' )
lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ )
return common_inputs
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ):
'''simple docstring'''
lowercase : List[Any] =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = ["""model.decoder.embed_positions.weights"""]
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> List[str]:
if "emb" in name:
lowercase : List[str] =name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
lowercase : List[str] =name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
lowercase : Union[str, Any] =name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
lowercase : Tuple =name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
lowercase : Optional[Any] =name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
lowercase : List[Any] =name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
lowercase : int =name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
lowercase : Union[str, Any] =name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
lowercase : Optional[int] =name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
lowercase : Any =name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase : Any =name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : Any ) -> Dict:
lowercase : str =list(state_dict.keys() )
lowercase : Tuple ={}
for key in keys:
lowercase : List[Any] =state_dict.pop(__lowerCAmelCase )
lowercase : Tuple =rename_keys(__lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase : Any =val[:hidden_size, :]
lowercase : Union[str, Any] =val[hidden_size : 2 * hidden_size, :]
lowercase : Optional[int] =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase : Optional[Any] =val
else:
lowercase : Dict =val
return state_dict, enc_dec_proj_state_dict
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Optional[int]:
if checkpoint == "small":
# default config values
lowercase : Any =1024
lowercase : Tuple =24
lowercase : Union[str, Any] =16
elif checkpoint == "medium":
lowercase : List[Any] =1536
lowercase : str =48
lowercase : str =24
elif checkpoint == "large":
lowercase : Dict =2048
lowercase : Optional[int] =48
lowercase : Any =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowercase : Optional[int] =MusicgenDecoderConfig(
hidden_size=__lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , )
return config
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : List[str]=None , __magic_name__ : List[Any]=None , __magic_name__ : Dict="cpu" ) -> Optional[Any]:
lowercase : str =MusicGen.get_pretrained(__lowerCAmelCase , device=__lowerCAmelCase )
lowercase : Union[str, Any] =decoder_config_from_checkpoint(__lowerCAmelCase )
lowercase : List[Any] =fairseq_model.lm.state_dict()
lowercase : Any =rename_state_dict(
__lowerCAmelCase , hidden_size=decoder_config.hidden_size )
lowercase : Tuple =TaEncoderModel.from_pretrained('''t5-base''' )
lowercase : int =EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
lowercase : Tuple =MusicgenForCausalLM(__lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase : Any =decoder.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowercase : Tuple =MusicgenForConditionalGeneration(text_encoder=__lowerCAmelCase , audio_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCAmelCase )
# check we can do a forward pass
lowercase : Any =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase : Tuple =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase : Any =model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
lowercase : Optional[int] =AutoTokenizer.from_pretrained('''t5-base''' )
lowercase : List[str] =AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
lowercase : Optional[int] =MusicgenProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# set the appropriate bos/pad token ids
lowercase : Dict =2048
lowercase : List[str] =2048
# set other default generation config params
lowercase : Dict =int(30 * audio_encoder.config.frame_rate )
lowercase : Any =True
lowercase : Dict =3.0
if pytorch_dump_folder is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(__lowerCAmelCase )
processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCamelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 718 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : List[str] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 719 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 | 0 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase_ = '\nimport os\n'
UpperCamelCase_ = '\ndef foo():\n import os\n return False\n'
UpperCamelCase_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
UpperCamelCase_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
UpperCamelCase_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
UpperCamelCase_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
UpperCamelCase_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
UpperCamelCase_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
UpperCamelCase_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
UpperCamelCase_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
UpperCamelCase_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , UpperCamelCase__ )
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Tuple ) -> Tuple:
lowercase : Dict =os.path.join(UpperCamelCase__ , '''test_file.py''' )
with open(UpperCamelCase__ , '''w''' ) as _tmp_file:
_tmp_file.write(UpperCamelCase__ )
lowercase : Optional[Any] =get_imports(UpperCamelCase__ )
assert parsed_imports == ["os"]
| 720 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ : str ) -> Union[str, Any]:
lowercase : Union[str, Any] =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase : List[str] =json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowercase : Tuple =args.output + '''.pt'''
lowercase : int =OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase : List[Any] =tf.train.load_checkpoint(args.tf_model_dir )
lowercase : int =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Any =reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase : int =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase : Union[str, Any] =8
lowercase : Any ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : Dict =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
lowercase : Union[str, Any] =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase : Optional[int] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase : Union[str, Any] =key_name[-9:-7]
for i in range(16 ):
lowercase : Dict ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase : Any =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
lowercase : Dict =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Any =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
lowercase : List[Any] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
lowercase : int ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int =torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
lowercase : str ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
lowercase : int =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : Any ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase : Optional[int] =vnp.copy() # same because it is one dimensional
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Optional[Any] ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : List[Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
lowercase : int =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase : Optional[int] =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Dict =state[:, 0, :, :]
lowercase : Tuple =state[:, 1, :, :]
lowercase : List[Any] =state[:, 2, :, :]
lowercase : Optional[int] =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase : Dict =torch.tensor(__magic_name__ )
lowercase : List[Any] ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
lowercase : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase : Tuple =torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
lowercase : Dict ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase : List[Any] =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
lowercase : Optional[Any] =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player
lowercase : Union[str, Any] =vnp.copy() # same because it is one dimensional
lowercase : List[str] =torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
lowercase : Any ='''model.blocks.%d.self_attn.norm.weight''' % player
lowercase : Any =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase : Any ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase : Optional[Any] ='''model.%s.weight''' % nlayer
lowercase : Optional[int] =vnp.copy() # same in embedded
lowercase : List[Any] =torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
lowercase : Tuple ='''lm_head.weight'''
lowercase : str =vnp.copy() # same in embedded
lowercase : Union[str, Any] =torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
lowercase : List[str] ='''final_logits_bias'''
lowercase : Dict =vnp.copy() # same in embedded
lowercase : Tuple =state.reshape((1, -1) )
lowercase : Dict =torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
lowercase : Dict ='''model.last_project.weight'''
lowercase : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
lowercase : List[Any] ='''model.last_project.bias'''
lowercase : str =vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] =torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
UpperCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 88 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase_ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = PegasusConfig
lowerCamelCase_ = {}
lowerCamelCase_ = 'gelu'
def __init__( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=False , UpperCAmelCase__ : List[str]=99 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=20 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : List[Any]=0 , ):
'''simple docstring'''
lowercase : Dict =parent
lowercase : List[Any] =batch_size
lowercase : Union[str, Any] =seq_length
lowercase : List[str] =is_training
lowercase : Tuple =use_labels
lowercase : Optional[int] =vocab_size
lowercase : Optional[int] =hidden_size
lowercase : Dict =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Optional[Any] =intermediate_size
lowercase : List[str] =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Tuple =max_position_embeddings
lowercase : str =eos_token_id
lowercase : Any =pad_token_id
lowercase : List[str] =bos_token_id
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowercase : Optional[int] =np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowercase : Any =np.concatenate([input_ids, eos_tensor] , axis=1 )
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase : Any =prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
lowercase : List[Any] =20
lowercase : Optional[Any] =model_class_name(A__ )
lowercase : Optional[Any] =model.encode(inputs_dict['''input_ids'''] )
lowercase , lowercase : Union[str, Any] =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase : Optional[Any] =model.init_cache(decoder_input_ids.shape[0] , A__ , A__ )
lowercase : str =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase : Optional[Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase : int =model.decode(
decoder_input_ids[:, :-1] , A__ , decoder_attention_mask=A__ , past_key_values=A__ , decoder_position_ids=A__ , )
lowercase : Optional[Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : Any =model.decode(
decoder_input_ids[:, -1:] , A__ , decoder_attention_mask=A__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A__ , )
lowercase : Any =model.decode(A__ , A__ )
lowercase : str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[Any] =20
lowercase : List[Any] =model_class_name(A__ )
lowercase : Optional[Any] =model.encode(inputs_dict['''input_ids'''] )
lowercase , lowercase : List[str] =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase : Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase : int =model.init_cache(decoder_input_ids.shape[0] , A__ , A__ )
lowercase : Optional[Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase : List[Any] =model.decode(
decoder_input_ids[:, :-1] , A__ , decoder_attention_mask=A__ , past_key_values=A__ , decoder_position_ids=A__ , )
lowercase : Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : Union[str, Any] =model.decode(
decoder_input_ids[:, -1:] , A__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A__ , decoder_position_ids=A__ , )
lowercase : Union[str, Any] =model.decode(A__ , A__ , decoder_attention_mask=A__ )
lowercase : int =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=None , __magic_name__ : int=None , ):
if attention_mask is None:
lowercase : Any =np.not_equal(__magic_name__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase : Tuple =np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __SCREAMING_SNAKE_CASE ( __a , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCamelCase_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[int] =FlaxPegasusModelTester(self )
lowercase : Dict =ConfigTester(self , config_class=A__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase , lowercase : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A__ , A__ , A__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase , lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A__ , A__ , A__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase , lowercase : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : Dict =self._prepare_for_class(A__ , A__ )
lowercase : List[str] =model_class(A__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] ):
return model.encode(input_ids=A__ , attention_mask=A__ )
with self.subTest('''JIT Enabled''' ):
lowercase : List[Any] =encode_jitted(**A__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase : Dict =encode_jitted(**A__ ).to_tuple()
self.assertEqual(len(A__ ) , len(A__ ) )
for jitted_output, output in zip(A__ , A__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : Optional[int] =model_class(A__ )
lowercase : Optional[Any] =model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowercase : Optional[int] ={
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ):
return model.decode(
decoder_input_ids=A__ , decoder_attention_mask=A__ , encoder_outputs=A__ , )
with self.subTest('''JIT Enabled''' ):
lowercase : Dict =decode_jitted(**A__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase : List[str] =decode_jitted(**A__ ).to_tuple()
self.assertEqual(len(A__ ) , len(A__ ) )
for jitted_output, output in zip(A__ , A__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : int =model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=A__ )
lowercase : int =np.ones((1, 1) )
lowercase : List[str] =model(A__ )
self.assertIsNotNone(A__ )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Any =FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
lowercase : str =PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
lowercase : int =[
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowercase : List[Any] =[
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowercase : int =tokenizer(A__ , return_tensors='''np''' , truncation=A__ , max_length=512 , padding=A__ )
lowercase : Optional[int] =model.generate(**A__ , num_beams=2 ).sequences
lowercase : List[Any] =tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
assert tgt_text == decoded
| 700 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = BigBirdTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''<s>'''
lowercase : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Any =self.get_rust_tokenizer()
lowercase : int ='''I was born in 92000, and this is falsé.'''
lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ )
lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''Hello World!'''
lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict =''' '''.join(UpperCAmelCase__ )
lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' )
lowercase : Dict =BigBirdModel(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 88 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Dict ) -> List[Any]:
lowercase : str =0
lowercase : int =len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowercase : List[Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
lowercase : str =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowercase : Any =left
lowercase : Any =point
elif point > right:
lowercase : Optional[Any] =right
lowercase : Any =point
else:
if item < current_item:
lowercase : Dict =point - 1
else:
lowercase : Optional[int] =point + 1
return None
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> Optional[int]:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowercase : List[str] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif point > right:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point - 1 )
else:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point + 1 , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( __magic_name__ : int ) -> Optional[Any]:
if collection != sorted(_SCREAMING_SNAKE_CASE ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
UpperCamelCase_ = 0
if debug == 1:
UpperCamelCase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
UpperCamelCase_ = 67
UpperCamelCase_ = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("""Not found""")
| 701 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] ) -> str:
lowercase : Optional[Any] =[0 for i in range(r + 1 )]
# nc0 = 1
lowercase : Optional[Any] =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase : str =min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 88 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] ) -> List[str]:
lowercase : Tuple =1.5
lowercase : Dict =int(factor * num_class_images )
lowercase : Any =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowercase__ , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=lowercase__ )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : int =client.query(text=lowercase__ )
if len(lowercase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase : Dict =int(factor * num_images )
lowercase : List[Any] =ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowercase__ , aesthetic_weight=0.1 , )
lowercase : Optional[Any] =0
lowercase : str =0
lowercase : Optional[int] =tqdm(desc='''downloading real regularization images''' , total=lowercase__ )
with open(f'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(f'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
f'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
lowercase : str =class_images[count]
count += 1
try:
lowercase : Any =requests.get(images['''url'''] )
if img.status_code == 200:
lowercase : Any =Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowerCAmelCase ( ) -> str:
lowercase : int =argparse.ArgumentParser('''''' , add_help=lowercase__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=lowercase__ , type=lowercase__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=lowercase__ , type=lowercase__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=lowercase__ )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 702 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> bool:
lowercase : Optional[int] =first_str.lower().strip()
lowercase : Union[str, Any] =second_str.lower().strip()
# Remove whitespace
lowercase : Optional[int] =first_str.replace(''' ''' , '''''' )
lowercase : Optional[Any] =second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__magic_name__ ) != len(__magic_name__ ):
return False
# Default values for count should be 0
lowercase : defaultdict[str, int] =defaultdict(__magic_name__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__magic_name__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase_ = input("""Enter the first string """).strip()
UpperCamelCase_ = input("""Enter the second string """).strip()
UpperCamelCase_ = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 88 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=a__ ):
lowerCamelCase_ = ["""note_seq"""]
def __init__( self : List[Any] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict ):
'''simple docstring'''
requires_backends(self , ['''note_seq'''] )
@classmethod
def lowerCamelCase_ ( cls : Any , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
| 703 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = None
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = BloomTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 'tokenizer_file'
lowerCamelCase_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
lowercase : Union[str, Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : str =self.get_rust_tokenizer()
lowercase : List[str] =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase : Any =[[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any =tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : int =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Tuple ='''This is a simple input'''
lowercase : int =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[Any] =('''This is a simple input''', '''This is a pair''')
lowercase : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase : Optional[int] =None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_rust_tokenizer()
lowercase : Dict =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
lowercase : Union[str, Any] =next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
lowercase : int =list(sample_data.values() )
lowercase : Any =list(map(tokenizer.encode , UpperCAmelCase__ ) )
lowercase : List[str] =[tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 88 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionPanoramaPipeline
lowerCamelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : str =DDIMScheduler()
torch.manual_seed(0 )
lowercase : List[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : Union[str, Any] =CLIPTextModel(lowercase__ )
lowercase : Optional[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : List[str] ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str=0 ):
'''simple docstring'''
lowercase : Any =torch.manual_seed(lowercase__ )
lowercase : List[str] ={
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase : List[str] =self.get_dummy_components()
lowercase : Any =StableDiffusionPanoramaPipeline(**lowercase__ )
lowercase : List[str] =sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
lowercase : str =self.get_dummy_inputs(lowercase__ )
lowercase : str =sd_pipe(**lowercase__ ).images
lowercase : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : str =np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase : Tuple =self.get_dummy_components()
lowercase : Any =StableDiffusionPanoramaPipeline(**lowercase__ )
lowercase : List[str] =sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
lowercase : Union[str, Any] =self.get_dummy_inputs(lowercase__ )
lowercase : Union[str, Any] ="""french fries"""
lowercase : List[Any] =sd_pipe(**lowercase__ , negative_prompt=lowercase__ )
lowercase : Any =output.images
lowercase : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : List[str] =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase : int =self.get_dummy_components()
lowercase : Union[str, Any] =StableDiffusionPanoramaPipeline(**lowercase__ )
lowercase : Any =sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
lowercase : Tuple =self.get_dummy_inputs(lowercase__ )
lowercase : str =sd_pipe(**lowercase__ , view_batch_size=2 )
lowercase : str =output.images
lowercase : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : int =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase : Any =self.get_dummy_components()
lowercase : Any =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
lowercase : Any =StableDiffusionPanoramaPipeline(**lowercase__ )
lowercase : List[Any] =sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
lowercase : int =self.get_dummy_inputs(lowercase__ )
lowercase : Dict =sd_pipe(**lowercase__ ).images
lowercase : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : int =np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase : Tuple =self.get_dummy_components()
lowercase : int =PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , skip_prk_steps=lowercase__ )
lowercase : Optional[int] =StableDiffusionPanoramaPipeline(**lowercase__ )
lowercase : List[Any] =sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
lowercase : str =self.get_dummy_inputs(lowercase__ )
lowercase : Dict =sd_pipe(**lowercase__ ).images
lowercase : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : List[str] =np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[str]=0 ):
'''simple docstring'''
lowercase : int =torch.manual_seed(lowercase__ )
lowercase : str ={
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple ="""stabilityai/stable-diffusion-2-base"""
lowercase : str =DDIMScheduler.from_pretrained(lowercase__ , subfolder='''scheduler''' )
lowercase : Any =StableDiffusionPanoramaPipeline.from_pretrained(lowercase__ , scheduler=lowercase__ , safety_checker=lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
lowercase : Any =self.get_inputs()
lowercase : Tuple =pipe(**lowercase__ ).images
lowercase : List[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase : List[str] =np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Optional[int] =StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=lowercase__ )
lowercase : Optional[int] =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
lowercase : Tuple =self.get_inputs()
lowercase : List[Any] =pipe(**lowercase__ ).images
lowercase : Dict =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase : str =np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Union[str, Any] =0
def callback_fn(UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ) -> None:
lowercase : Optional[int] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase : Optional[int] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase : Dict =latents[0, -3:, -3:, -1]
lowercase : Dict =np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase : Optional[Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase : Optional[int] =latents[0, -3:, -3:, -1]
lowercase : int =np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase : Tuple =False
lowercase : Optional[Any] ="""stabilityai/stable-diffusion-2-base"""
lowercase : Dict =DDIMScheduler.from_pretrained(lowercase__ , subfolder='''scheduler''' )
lowercase : Any =StableDiffusionPanoramaPipeline.from_pretrained(lowercase__ , scheduler=lowercase__ , safety_checker=lowercase__ )
lowercase : Union[str, Any] =pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
lowercase : List[Any] =self.get_inputs()
pipe(**lowercase__ , callback=lowercase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : str ="""stabilityai/stable-diffusion-2-base"""
lowercase : Tuple =DDIMScheduler.from_pretrained(lowercase__ , subfolder='''scheduler''' )
lowercase : Tuple =StableDiffusionPanoramaPipeline.from_pretrained(lowercase__ , scheduler=lowercase__ , safety_checker=lowercase__ )
lowercase : Optional[int] =pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : Any =self.get_inputs()
lowercase : List[str] =pipe(**lowercase__ )
lowercase : int =torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 704 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 50000000 ) -> Optional[Any]:
lowercase : int =set()
lowercase : Union[str, Any] =int((limit - 24) ** (1 / 2) )
lowercase : Optional[Any] =set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_ ) ) )
for primea in primes:
lowercase : Any =primea * primea
for primea in primes:
lowercase : Union[str, Any] =primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase : List[Any] =primea * primea * primea * primea
lowercase : Dict =square + cube + tetr
if total >= limit:
break
ret.add(a_ )
return len(a_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 705 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 88 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _lowerCAmelCase ( __magic_name__ : Namespace ) -> Union[str, Any]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase_ = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase__ : ArgumentParser ):
'''simple docstring'''
lowercase : Dict =parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=UpperCAmelCase__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , *UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : Dict =logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'''Loading model {model_type}''' )
lowercase : List[Any] =model_type
lowercase : List[Any] =tf_checkpoint
lowercase : Tuple =pytorch_dump_output
lowercase : Tuple =config
lowercase : Optional[Any] =finetuning_task_name
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
lowercase : List[Any] =self._tf_checkpoint
lowercase : List[str] =''''''
else:
lowercase : List[str] =self._tf_checkpoint
lowercase : Optional[Any] =''''''
convert_transfo_xl_checkpoint_to_pytorch(
UpperCAmelCase__ , self._config , self._pytorch_dump_output , UpperCAmelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 706 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ = """transformer"""
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase_ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 | 0 |
'''simple docstring'''
from math import loga
def _lowerCAmelCase ( __magic_name__ : Dict ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowercase_ , lowercase_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Dict:
for param in module.parameters():
lowercase : List[str] =False
def _lowerCAmelCase ( ) -> List[str]:
lowercase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase : Optional[int] ='''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
lowercase : Optional[int] =plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Any =datetime.now()
lowercase : Dict =current_time.strftime('''%H:%M:%S''' )
return timestamp
| 88 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> str:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowercase : Any =False
if num < 0:
lowercase : List[str] =True
lowercase : Tuple =-num
lowercase : List[Any] =[]
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__UpperCamelCase ) for e in binary )
return "0b" + "".join(str(__UpperCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> List[Any]:
lowercase : Tuple =HfArgumentParser(__magic_name__ )
lowercase : Union[str, Any] =parser.parse_args_into_dataclasses()[0]
lowercase : Any =TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase : List[Any] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : List[Any] ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any =''' '''.join(str(__magic_name__ ).split(''' ''' )[:-1] )
lowercase : Optional[Any] =''''''
lowercase : List[str] =eval(str(__magic_name__ ).split(''' ''' )[-1] )
lowercase : Optional[Any] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int =full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 88 | 0 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCamelCase_ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase_ = field(default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCamelCase_ = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase_ = field(
default=0.1_5 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase_ = field(
default=_lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
if self.train_file is not None:
lowercase : Union[str, Any] =self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase : List[str] =self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> Union[str, Any]:
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase : Union[str, Any] =[json.loads(__magic_name__ ) for line in f.read().splitlines() if (len(__magic_name__ ) > 0 and not line.isspace())]
assert len(__magic_name__ ) == len(__magic_name__ )
lowercase : Any ={c: dataset[c] for c in dataset.column_names}
lowercase : Dict =refs
return Dataset.from_dict(__magic_name__ )
def _lowerCAmelCase ( ) -> int:
lowercase : Tuple =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : int =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase : List[Any] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase : int =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase : Tuple =load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase : Optional[Any] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , )
lowercase : Optional[Any] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowercase : int ={}
if data_args.train_file is not None:
lowercase : Tuple =data_args.train_file
if data_args.validation_file is not None:
lowercase : int =data_args.validation_file
lowercase : str =data_args.train_file.split('''.''' )[-1]
if extension == "txt":
lowercase : Tuple ='''text'''
lowercase : Tuple =load_dataset(__magic_name__ , data_files=__magic_name__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : Tuple ={
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase : Dict =AutoConfig.from_pretrained(model_args.config_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase : Union[str, Any] =AutoConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
lowercase : Optional[Any] =CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
lowercase : Tuple ={
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase : Dict =AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase : List[Any] =AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
lowercase : Dict =AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase : List[str] =AutoModelForMaskedLM.from_config(__magic_name__ )
model.resize_token_embeddings(len(__magic_name__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase : str =datasets['''train'''].column_names
else:
lowercase : Union[str, Any] =datasets['''validation'''].column_names
lowercase : Optional[Any] ='''text''' if '''text''' in column_names else column_names[0]
lowercase : List[str] ='''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__magic_name__ : Tuple ):
# Remove empty lines
lowercase : Union[str, Any] =[line for line in examples['''text'''] if len(__magic_name__ ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__magic_name__ , truncation=__magic_name__ , max_length=data_args.max_seq_length )
lowercase : List[Any] =datasets.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase : List[Any] =add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase : Union[str, Any] =add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase : int =data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase : Union[str, Any] =False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase : List[str] =DataCollatorForWholeWordMask(tokenizer=__magic_name__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase : List[Any] =Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase : Any =last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase : Dict =model_args.model_name_or_path
else:
lowercase : str =None
lowercase : Optional[Any] =trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase : List[str] =os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
lowercase : List[str] ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : int =trainer.evaluate()
lowercase : Dict =math.exp(eval_output['''eval_loss'''] )
lowercase : List[Any] =perplexity
lowercase : Optional[int] =os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> List[Any]:
main()
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] ) -> int:
if len(snake_case_ ) <= 1:
return [tuple(snake_case_ )]
lowercase : str =[]
def generate(__magic_name__ : Dict , __magic_name__ : int ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowercase : Optional[Any] =arr[k - 1], arr[i]
else: # k is odd
lowercase : Dict =arr[k - 1], arr[0]
generate(k - 1 , snake_case_ )
generate(len(snake_case_ ) , snake_case_ )
return res
if __name__ == "__main__":
UpperCamelCase_ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase_ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 710 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88 | 0 |
'''simple docstring'''
from math import isqrt
def _lowerCAmelCase ( __magic_name__ : int ) -> list[int]:
lowercase : Optional[int] =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ):
lowercase : List[str] =False
return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]]
def _lowerCAmelCase ( __magic_name__ : int = 10**8 ) -> int:
lowercase : Optional[int] =calculate_prime_numbers(max_number // 2 )
lowercase : str =0
lowercase : str =0
lowercase : List[Any] =len(__UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 711 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
lowercase : int ={}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
lowercase : Tuple =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
lowercase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> str:
with open(__magic_name__ ) as f:
lowercase : Optional[int] =f.read(1 )
lowercase : List[Any] =start_node
lowercase : List[Any] =[]
lowercase : str =start_node
lowercase : str =0
while visiting not in first_solution:
lowercase : Optional[int] =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
lowercase : List[Any] =k[1]
lowercase : str =k[0]
first_solution.append(__magic_name__ )
lowercase : Any =distance_of_first_solution + int(__magic_name__ )
lowercase : Optional[int] =best_node
first_solution.append(__magic_name__ )
lowercase : str =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Any ) -> Tuple:
lowercase : Tuple =[]
for n in solution[1:-1]:
lowercase : Dict =solution.index(__magic_name__ )
for kn in solution[1:-1]:
lowercase : Tuple =solution.index(__magic_name__ )
if n == kn:
continue
lowercase : Union[str, Any] =copy.deepcopy(__magic_name__ )
lowercase : Optional[int] =kn
lowercase : List[Any] =n
lowercase : List[Any] =0
for k in _tmp[:-1]:
lowercase : Optional[int] =_tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase : Optional[int] =distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Union[str, Any]:
lowercase : str =1
lowercase : List[Any] =first_solution
lowercase : Any =[]
lowercase : str =distance_of_first_solution
lowercase : str =solution
while count <= iters:
lowercase : Union[str, Any] =find_neighborhood(__magic_name__ , __magic_name__ )
lowercase : Dict =0
lowercase : int =neighborhood[index_of_best_solution]
lowercase : Optional[int] =len(__magic_name__ ) - 1
lowercase : List[Any] =False
while not found:
lowercase : List[Any] =0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
lowercase : List[str] =best_solution[i]
lowercase : Dict =solution[i]
break
lowercase : Any =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase : str =True
lowercase : int =best_solution[:-1]
lowercase : Any =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase : Optional[int] =cost
lowercase : str =solution
else:
lowercase : Optional[int] =index_of_best_solution + 1
lowercase : List[Any] =neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
lowercase : Optional[int] =count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( __magic_name__ : str=None ) -> Tuple:
lowercase : List[str] =generate_neighbours(args.File )
lowercase , lowercase : Optional[Any] =generate_first_solution(
args.File , __magic_name__ )
lowercase , lowercase : int =tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCamelCase_ = "bit"
lowerCamelCase_ = ["preactivation", "bottleneck"]
lowerCamelCase_ = ["SAME", "VALID"]
def __init__( self : int , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Tuple=64 , UpperCAmelCase__ : List[Any]=[256, 512, 1024, 2048] , UpperCAmelCase__ : Optional[int]=[3, 4, 6, 3] , UpperCAmelCase__ : List[str]="preactivation" , UpperCAmelCase__ : Union[str, Any]="relu" , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowercase : int =global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
lowercase : Any =num_channels
lowercase : int =embedding_size
lowercase : str =hidden_sizes
lowercase : List[str] =depths
lowercase : Union[str, Any] =layer_type
lowercase : int =hidden_act
lowercase : Union[str, Any] =global_padding
lowercase : Any =num_groups
lowercase : Any =drop_path_rate
lowercase : Optional[int] =embedding_dynamic_padding
lowercase : Tuple =output_stride
lowercase : int =width_factor
lowercase : int =['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
lowercase , lowercase : Dict =get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 712 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 0 |
'''simple docstring'''
from typing import Any
def _lowerCAmelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : dict , __magic_name__ : dict , __magic_name__ : dict , ) -> list:
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
lowercase : dict ={}
lowercase : dict ={}
for state in states_space:
lowercase : List[Any] =observations_space[0]
lowercase : str =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowercase : str =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
lowercase : Any =observations_space[o]
lowercase : Tuple =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowercase : Tuple =""
lowercase : Union[str, Any] =-1
for k_state in states_space:
lowercase : int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowercase : str =probability
lowercase : Tuple =k_state
# Update probabilities and pointers dicts
lowercase : List[str] =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowercase : List[str] =arg_max
# The final observation
lowercase : str =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
lowercase : Optional[int] =""
lowercase : List[str] =-1
for k_state in states_space:
lowercase : List[str] =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowercase : List[str] =probability
lowercase : int =k_state
lowercase : Any =arg_max
# Process pointers backwards
lowercase : int =last_state
lowercase : List[str] =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
lowercase : List[str] =pointers[previous, observations_space[o]]
result.reverse()
return result
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , ) -> None:
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Any ) -> None:
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str ) -> None:
if not isinstance(_object , __snake_case ):
lowercase : Optional[int] =f'''{var_name} must be a list'''
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
lowercase : Any =f'''{var_name} must be a list of strings'''
raise ValueError(__snake_case )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , ) -> None:
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str ) -> None:
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : type , __magic_name__ : bool = False ) -> None:
if not isinstance(_object , __snake_case ):
lowercase : str =f'''{var_name} must be a dict'''
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
lowercase : List[Any] =f'''{var_name} all keys must be strings'''
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
lowercase : Optional[int] ="nested dictionary " if nested else ""
lowercase : int =f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BioGptTokenizer
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase : Any =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : Union[str, Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Dict ='''lower newer'''
lowercase : str ='''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : List[Any] =BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase : Any ='''lower'''
lowercase : int =['''low''', '''er</w>''']
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[int] =tokens + ['''<unk>''']
lowercase : Any =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowercase : List[str] =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
lowercase : Optional[int] =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
lowercase : str =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 88 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : str =StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowercase : Union[str, Any] =sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler('''sample_euler''' )
lowercase : Dict ='''A painting of a squirrel eating a burger'''
lowercase : Tuple =torch.manual_seed(0 )
lowercase : int =sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
lowercase : Dict =output.images
lowercase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[Any] =np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase : Tuple =sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler('''sample_euler''' )
lowercase : List[str] ='''A painting of a squirrel eating a burger'''
lowercase : Optional[Any] =torch.manual_seed(0 )
lowercase : str =sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
lowercase : Dict =output.images
lowercase : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : List[Any] =np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase : int =sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
lowercase : Tuple ='''A painting of a squirrel eating a burger'''
lowercase : Dict =torch.manual_seed(0 )
lowercase : List[Any] =sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__A , )
lowercase : Optional[int] =output.images
lowercase : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[int] =np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 714 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88 | 0 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCamelCase_ = threading.Lock()
UpperCamelCase_ = None
UpperCamelCase_ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
UpperCamelCase_ = logging.WARNING
UpperCamelCase_ = True
def _lowerCAmelCase ( ) -> Any:
lowercase : Tuple =os.getenv('''TRANSFORMERS_VERBOSITY''' , UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def _lowerCAmelCase ( ) -> int:
return __name__.split('''.''' )[0]
def _lowerCAmelCase ( ) -> Optional[int]:
return logging.getLogger(_get_library_name() )
def _lowerCAmelCase ( ) -> Union[str, Any]:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowercase : Optional[int] =logging.StreamHandler() # Set sys.stderr as stream.
lowercase : Any =sys.stderr.flush
# Apply our default configuration to the library root logger.
lowercase : str =_get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowercase : List[Any] =False
def _lowerCAmelCase ( ) -> Union[str, Any]:
global _default_handler
with _lock:
if not _default_handler:
return
lowercase : Optional[int] =_get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowercase : int =None
def _lowerCAmelCase ( ) -> List[Any]:
return log_levels
def _lowerCAmelCase ( __magic_name__ : Optional[str] = None ) -> Optional[int]:
if name is None:
lowercase : int =_get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase__ )
def _lowerCAmelCase ( ) -> List[str]:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowerCAmelCase ( __magic_name__ : int ) -> Union[str, Any]:
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase__ )
def _lowerCAmelCase ( ) -> Optional[int]:
return set_verbosity(UpperCamelCase__ )
def _lowerCAmelCase ( ) -> List[Any]:
return set_verbosity(UpperCamelCase__ )
def _lowerCAmelCase ( ) -> Optional[int]:
return set_verbosity(UpperCamelCase__ )
def _lowerCAmelCase ( ) -> Union[str, Any]:
return set_verbosity(UpperCamelCase__ )
def _lowerCAmelCase ( ) -> Optional[int]:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _lowerCAmelCase ( ) -> List[str]:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _lowerCAmelCase ( __magic_name__ : logging.Handler ) -> List[Any]:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase__ )
def _lowerCAmelCase ( __magic_name__ : logging.Handler ) -> Optional[Any]:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase__ )
def _lowerCAmelCase ( ) -> Tuple:
_configure_library_root_logger()
lowercase : Any =False
def _lowerCAmelCase ( ) -> Dict:
_configure_library_root_logger()
lowercase : Any =True
def _lowerCAmelCase ( ) -> str:
lowercase : Optional[Any] =_get_library_root_logger().handlers
for handler in handlers:
lowercase : Optional[int] =logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(UpperCamelCase__ )
def _lowerCAmelCase ( ) -> Union[str, Any]:
lowercase : List[str] =_get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase__ )
def _lowerCAmelCase ( self : str , *__magic_name__ : Any , **__magic_name__ : Dict ) -> str:
lowercase : List[str] =os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , UpperCamelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase_ = warning_advice
@functools.lru_cache(UpperCamelCase__ )
def _lowerCAmelCase ( self : Dict , *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> int:
self.warning(*UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase_ = warning_once
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ): # pylint: disable=unused-argument
'''simple docstring'''
lowercase : Dict =args[0] if args else None
def __iter__( self : Any ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
def empty_fn(*UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ):
'''simple docstring'''
return self
def __exit__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ):
'''simple docstring'''
return
class __SCREAMING_SNAKE_CASE :
def __call__( self : List[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Any ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*__A , **__A )
else:
return EmptyTqdm(*__A , **__A )
def lowerCamelCase_ ( self : Tuple , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : Any =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__A , **__A )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase_ = _tqdm_cls()
def _lowerCAmelCase ( ) -> str:
global _tqdm_active
return bool(_tqdm_active )
def _lowerCAmelCase ( ) -> str:
global _tqdm_active
lowercase : Dict =True
hf_hub_utils.enable_progress_bars()
def _lowerCAmelCase ( ) -> Dict:
global _tqdm_active
lowercase : int =False
hf_hub_utils.disable_progress_bars()
| 715 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Input as list
lowercase : Optional[int] =list(poly_a or [0] )[:]
lowercase : Optional[Any] =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : Any =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : Dict =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Union[str, Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : Tuple =self.__multiply()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
lowercase : Any =self.c_max_length // 2
while next_ncol > 0:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : Tuple =self.root**next_ncol
# First half of next step
lowercase : str =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : int =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Dict =new_dft
lowercase : Tuple =next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =self.__dft('''A''' )
lowercase : Any =self.__dft('''B''' )
lowercase : Optional[int] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Optional[int] =2
while next_ncol <= self.c_max_length:
lowercase : Optional[int] =[[] for i in range(UpperCAmelCase__ )]
lowercase : List[str] =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Tuple =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
'''simple docstring'''
lowercase : Any ='''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : Tuple ='''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : List[str] ='''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowerCAmelCase ( ) -> str:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowerCAmelCase ( ) -> str:
lowercase : List[str] ='''mock-s3-bucket'''
lowercase : str =f'''s3://{mock_bucket}'''
lowercase : List[Any] =extract_path_from_uri(_UpperCAmelCase )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Any ='''./local/path'''
lowercase : int =extract_path_from_uri(_UpperCAmelCase )
assert dataset_path == new_dataset_path
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> Any:
lowercase : List[str] =is_remote_filesystem(_UpperCAmelCase )
assert is_remote is True
lowercase : str =fsspec.filesystem('''file''' )
lowercase : str =is_remote_filesystem(_UpperCAmelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCAmelCase )
def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : Tuple ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] =input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : List[Any] =f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
lowercase : Optional[Any] =fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
lowercase : Optional[int] =os.path.basename(_UpperCAmelCase )
lowercase : int =expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCAmelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any ) -> Optional[int]:
lowercase : List[Any] ={'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : str =compressed_file_paths[protocol]
lowercase : Dict ='''dataset.jsonl'''
lowercase : int =f'''{protocol}://{member_file_path}::{compressed_file_path}'''
lowercase , *lowercase : Any =fsspec.get_fs_token_paths(_UpperCAmelCase )
assert fs.isfile(_UpperCAmelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def _lowerCAmelCase ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : List[str] ) -> Any:
lowercase : Union[str, Any] =hf_api.dataset_info(_UpperCAmelCase , token=_UpperCAmelCase )
lowercase : str =HfFileSystem(repo_info=_UpperCAmelCase , token=_UpperCAmelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCAmelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def _lowerCAmelCase ( ) -> Optional[Any]:
lowercase : Any ='''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCAmelCase , _UpperCAmelCase , clobber=_UpperCAmelCase )
with pytest.warns(_UpperCAmelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCAmelCase ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 716 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 88 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.