code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import torch
from diffusers import StableDiffusionPipeline
_snake_case : str = 'path-to-your-trained-model'
_snake_case : List[str] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
_snake_case : List[str] = 'A photo of sks dog in a bucket'
_snake_case : List[str] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 53 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = LEDTokenizer
UpperCAmelCase__ = LEDTokenizerFast
UpperCAmelCase__ = True
def _lowercase ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase = {'''unk_token''': '''<unk>'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def _lowercase ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _lowercase ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _lowercase ( self , _A ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(_A , padding=_A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''labels''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=_A , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=_A , truncation=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ['''A long paragraph for summarization.''']
UpperCAmelCase = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(_A , return_tensors='''pt''' )
UpperCAmelCase = tokenizer(text_target=_A , return_tensors='''pt''' )
UpperCAmelCase = inputs['''input_ids''']
UpperCAmelCase = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ['''Summary of the text.''', '''Another summary.''']
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(_A , padding=_A )
UpperCAmelCase = [[0] * len(_A ) for x in encoded_output['''input_ids''']]
UpperCAmelCase = tokenizer.pad(_A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _A )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase = self.tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
UpperCAmelCase = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 130 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=3 , A_=4 , A_=None , ) -> List[Any]:
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 384
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = "gelu"
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 512
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.0_2
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = 128
lowerCAmelCase = 2
lowerCAmelCase = 9
lowerCAmelCase = 1
lowerCAmelCase = None
def __snake_case ( self ) -> Dict:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
lowerCAmelCase = TFConvBertModel(config=snake_case_ )
lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(snake_case_ )
lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
lowerCAmelCase = TFConvBertForMaskedLM(config=snake_case_ )
lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFConvBertForSequenceClassification(config=snake_case_ )
lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFConvBertForMultipleChoice(config=snake_case_ )
lowerCAmelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFConvBertForTokenClassification(config=snake_case_ )
lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = TFConvBertForQuestionAnswering(config=snake_case_ )
lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
lowerCAmelCase
) = config_and_inputs
lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __snake_case( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase : List[Any] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[Any] = False
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = TFConvBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def __snake_case ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = True
if hasattr(snake_case_ , """use_cache""" ):
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , snake_case_ )
for model_class in self.all_model_classes:
lowerCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ )
lowerCAmelCase = model_class(snake_case_ )
lowerCAmelCase = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
lowerCAmelCase = os.path.join(snake_case_ , """saved_model""" , """1""" )
lowerCAmelCase = tf.keras.models.load_model(snake_case_ )
lowerCAmelCase = model(snake_case_ )
if self.is_encoder_decoder:
lowerCAmelCase = outputs["encoder_hidden_states"]
lowerCAmelCase = outputs["encoder_attentions"]
else:
lowerCAmelCase = outputs["hidden_states"]
lowerCAmelCase = outputs["attentions"]
self.assertEqual(len(snake_case_ ) , snake_case_ )
lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(snake_case_ )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , snake_case_ )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , snake_case_ )
def check_decoder_attentions_output(A_ ):
lowerCAmelCase = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
lowerCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = model_class(snake_case_ )
lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
lowerCAmelCase = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
lowerCAmelCase = model_class(snake_case_ )
lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(snake_case_ )
lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(snake_case_ )
lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(snake_case_ )[0]
lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
lowerCAmelCase = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1e-4 ) | 721 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase = TaTokenizerFast
UpperCAmelCase = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 484 |
def __lowerCAmelCase ( __magic_name__ ):
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("only integers accepted as input" )
else:
_lowercase: Optional[Any] = str(abs(__magic_name__ ) )
_lowercase: Tuple = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int("".join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 226 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : List[str] = CycleDiffusionPipeline
__lowerCAmelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__lowerCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
__lowerCAmelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self ) -> int:
torch.manual_seed(0 )
_a : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
_a : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1_0_0_0 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
_a : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_a : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a : Tuple = CLIPTextModel(lowerCamelCase_ )
_a : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> Union[str, Any]:
_a : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_a : Optional[int] = image / 2 + 0.5
if str(lowerCamelCase_ ).startswith('mps' ):
_a : Optional[int] = torch.manual_seed(lowerCamelCase_ )
else:
_a : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_a : Dict = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self ) -> List[Any]:
_a : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Optional[int] = self.get_dummy_components()
_a : Tuple = CycleDiffusionPipeline(**lowerCamelCase_ )
_a : Optional[Any] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a : Optional[Any] = self.get_dummy_inputs(lowerCamelCase_ )
_a : str = pipe(**lowerCamelCase_ )
_a : List[Any] = output.images
_a : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_a : int = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCamelCase ( self ) -> str:
_a : List[str] = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase_ , 'half' ):
_a : Tuple = module.half()
_a : Tuple = CycleDiffusionPipeline(**lowerCamelCase_ )
_a : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a : Optional[Any] = self.get_dummy_inputs(lowerCamelCase_ )
_a : Dict = pipe(**lowerCamelCase_ )
_a : Any = output.images
_a : List[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_a : List[str] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self ) -> str:
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
return super().test_inference_batch_single_identical()
@skip_mps
def __UpperCamelCase ( self ) -> Optional[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCamelCase ( self ) -> Tuple:
return super().test_save_load_optional_components()
@skip_mps
def __UpperCamelCase ( self ) -> Optional[Any]:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> List[str]:
_a : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
_a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
_a : Dict = init_image.resize((5_1_2, 5_1_2) )
_a : List[Any] = 'CompVis/stable-diffusion-v1-4'
_a : Union[str, Any] = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler' )
_a : List[str] = CycleDiffusionPipeline.from_pretrained(
lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_a : Tuple = 'A black colored car'
_a : Any = 'A blue colored car'
_a : Any = torch.manual_seed(0 )
_a : Union[str, Any] = pipe(
prompt=lowerCamelCase_ , source_prompt=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase_ , output_type='np' , )
_a : Dict = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __UpperCamelCase ( self ) -> int:
_a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
_a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
_a : Optional[int] = init_image.resize((5_1_2, 5_1_2) )
_a : str = 'CompVis/stable-diffusion-v1-4'
_a : List[str] = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler' )
_a : List[str] = CycleDiffusionPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_a : Optional[int] = 'A black colored car'
_a : Any = 'A blue colored car'
_a : Optional[Any] = torch.manual_seed(0 )
_a : int = pipe(
prompt=lowerCamelCase_ , source_prompt=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase_ , output_type='np' , )
_a : Optional[Any] = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 424 |
'''simple docstring'''
from typing import Any
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Dict:
_a : int = data
_a : Any = None
def __repr__( self ) -> str:
return F'''Node({self.data})'''
class a :
'''simple docstring'''
def __init__( self ) -> int:
_a : Any = None
def __iter__( self ) -> Any:
_a : Dict = self.head
while node:
yield node.data
_a : Dict = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(lowerCamelCase_ ) for item in self] )
def __getitem__( self , lowerCamelCase_ ) -> Any:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : List[str] = self.head
for _ in range(lowerCamelCase_ ):
_a : List[str] = current.next
_a : str = data
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
self.insert_nth(len(self ) , lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
self.insert_nth(0 , lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : List[str] = Node(lowerCamelCase_ )
if self.head is None:
_a : int = new_node
elif index == 0:
_a : List[Any] = self.head # link new_node to head
_a : Optional[int] = new_node
else:
_a : Dict = self.head
for _ in range(index - 1 ):
_a : List[Any] = temp.next
_a : Any = temp.next
_a : Dict = new_node
def __UpperCamelCase ( self ) -> None: # print every node data
print(self )
def __UpperCamelCase ( self ) -> Any:
return self.delete_nth(0 )
def __UpperCamelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __UpperCamelCase ( self , lowerCamelCase_ = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : List[Any] = self.head # default first node
if index == 0:
_a : Union[str, Any] = self.head.next
else:
_a : List[Any] = self.head
for _ in range(index - 1 ):
_a : List[Any] = temp.next
_a : str = temp.next
_a : List[Any] = temp.next.next
return delete_node.data
def __UpperCamelCase ( self ) -> bool:
return self.head is None
def __UpperCamelCase ( self ) -> None:
_a : List[str] = None
_a : Optional[Any] = self.head
while current:
# Store the current node's next node.
_a : Union[str, Any] = current.next
# Make the current node's next point backwards
_a : Any = prev
# Make the previous node be the current node
_a : List[str] = current
# Make the current node the next node (to progress iteration)
_a : Tuple = next_node
# Return prev in order to put the head at the end
_a : Optional[Any] = prev
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(A ) == i
linked_list.insert_nth(A , i + 1 )
assert str(A ) == "->".join(str(A ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(A ) == "->".join(str(A ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(A ) == 9
assert str(A ) == "->".join(str(A ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(A ) == "->".join(str(A ) for i in range(-8 , 1 ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Dict = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'dlrow olleH',
7,
5_5_5_5,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(1_0 ),
None,
None,
12.20,
]
_a : Union[str, Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : int = linked_list.delete_head()
assert result == -9
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[int] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(A )
assert (
str(A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def UpperCAmelCase_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
_a : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(A )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
_a : List[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(A )
print(f'''length of linked_list is : {len(A )}''' )
if __name__ == "__main__":
main()
| 424 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 481 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''hf-internal-testing/tiny-random-t5'''
lowerCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = tokenizer('''This is me''' , return_tensors='''pt''' )
lowerCamelCase__ = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCamelCase__ = model.generate(**__lowerCAmelCase )
lowerCamelCase__ = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCamelCase__ = model_reloaded.generate(**__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''hf-internal-testing/tiny-random-t5'''
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCAmelCase ):
model.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = model.reverse_bettertransformer()
model.save_pretrained(__lowerCAmelCase )
| 481 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_SCREAMING_SNAKE_CASE : Optional[Any] = {'UserAgent': UserAgent().random}
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Optional[Any] = script.contents[0]
_lowercase: Dict = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Any):
_lowercase: str = f"https://www.instagram.com/{username}/"
_lowercase: List[Any] = self.get_json()
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: str = requests.get(self.url , headers=_UpperCamelCase).text
_lowercase: Dict = BeautifulSoup(_UpperCamelCase , "html.parser").find_all("script")
try:
return extract_user_profile(scripts[4])
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3])
def __repr__( self : str):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : List[str]):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def UpperCAmelCase__ ( self : str):
return self.user_data["username"]
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
return self.user_data["full_name"]
@property
def UpperCAmelCase__ ( self : Optional[int]):
return self.user_data["biography"]
@property
def UpperCAmelCase__ ( self : Tuple):
return self.user_data["business_email"]
@property
def UpperCAmelCase__ ( self : str):
return self.user_data["external_url"]
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase__ ( self : int):
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase__ ( self : str):
return self.user_data["is_verified"]
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return self.user_data["is_private"]
def __lowerCAmelCase ( __magic_name__ = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_lowercase: str = InstagramUser(__magic_name__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __magic_name__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : int = InstagramUser('github')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Dict = """canine"""
def __init__( self : int , _UpperCamelCase : Tuple=768 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Any=12 , _UpperCamelCase : Union[str, Any]=3_072 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Tuple=16_384 , _UpperCamelCase : Tuple=16 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Optional[Any]=0XE000 , _UpperCamelCase : Union[str, Any]=0XE001 , _UpperCamelCase : Optional[Any]=4 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : List[Any]=8 , _UpperCamelCase : Union[str, Any]=16_384 , _UpperCamelCase : List[Any]=128 , **_UpperCamelCase : Optional[int] , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: Optional[int] = max_position_embeddings
_lowercase: Union[str, Any] = hidden_size
_lowercase: Tuple = num_hidden_layers
_lowercase: str = num_attention_heads
_lowercase: Any = intermediate_size
_lowercase: Dict = hidden_act
_lowercase: str = hidden_dropout_prob
_lowercase: List[Any] = attention_probs_dropout_prob
_lowercase: Union[str, Any] = initializer_range
_lowercase: str = type_vocab_size
_lowercase: Any = layer_norm_eps
# Character config:
_lowercase: Optional[int] = downsampling_rate
_lowercase: int = upsampling_kernel_size
_lowercase: Optional[int] = num_hash_functions
_lowercase: str = num_hash_buckets
_lowercase: List[str] = local_transformer_stride
| 206 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False ):
try:
UpperCamelCase :Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase :Tuple = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase :str = strtobool(_a )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
__snake_case = parse_flag_from_env("""RUN_SLOW""", default=False)
__snake_case = parse_flag_from_env("""RUN_REMOTE""", default=False)
__snake_case = parse_flag_from_env("""RUN_LOCAL""", default=True)
__snake_case = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
__snake_case = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
__snake_case = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
__snake_case = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
__snake_case = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; """,
)
# Beam
__snake_case = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
__snake_case = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
__snake_case = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
try:
import faiss # noqa
except ImportError:
UpperCamelCase :List[Any] = unittest.skip('''test requires faiss''' )(_a )
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : int ):
try:
import regex # noqa
except ImportError:
UpperCamelCase :int = unittest.skip('''test requires regex''' )(_a )
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase :Optional[int] = unittest.skip('''test requires elasticsearch''' )(_a )
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : str ):
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase :str = unittest.skip('''test requires sqlalchemy''' )(_a )
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
if not config.TORCH_AVAILABLE:
UpperCamelCase :Optional[int] = unittest.skip('''test requires PyTorch''' )(_a )
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not config.TF_AVAILABLE:
UpperCamelCase :List[Any] = unittest.skip('''test requires TensorFlow''' )(_a )
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if not config.JAX_AVAILABLE:
UpperCamelCase :Any = unittest.skip('''test requires JAX''' )(_a )
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
if not config.PIL_AVAILABLE:
UpperCamelCase :Union[str, Any] = unittest.skip('''test requires Pillow''' )(_a )
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : int ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_a )
else:
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_a )
else:
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_a )
else:
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
def _require_spacy_model(SCREAMING_SNAKE_CASE__ : Tuple ):
try:
import spacy # noqa F401
spacy.load(_a )
except ImportError:
return unittest.skip('''test requires spacy''' )(_a )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_a ) )(_a )
else:
return test_case
return _require_spacy_model
def _A ( SCREAMING_SNAKE_CASE__ : str ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_a )
else:
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_a )
else:
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase :int = unittest.skip('''test is slow''' )(_a )
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase :List[Any] = unittest.skip('''test is local''' )(_a )
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : str ):
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase :Dict = unittest.skip('''test is packaged''' )(_a )
return test_case
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase :Union[str, Any] = unittest.skip('''test requires remote''' )(_a )
return test_case
def _A ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
def decorate(cls : List[Any] ):
for name, fn in cls.__dict__.items():
if callable(_a ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase :str = decorator(_a )
setattr(cls , _a , _a )
return cls
return decorate
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] =0
UpperCamelCase_ : List[str] =1
UpperCamelCase_ : Any =2
@contextmanager
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int]=OfflineSimulationMode.CONNECTION_FAILS , SCREAMING_SNAKE_CASE__ : str=1e-1_6 ):
UpperCamelCase :Tuple = requests.Session().request
def timeout_request(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase :List[str] = "https://10.255.255.1"
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
UpperCamelCase :List[Any] = timeout
try:
return online_request(_a , _a , **_a )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase :str = url
UpperCamelCase :Dict = e.args[0]
UpperCamelCase :Union[str, Any] = (max_retry_error.args[0].replace('''10.255.255.1''' , F'''OfflineMock[{url}]''' ),)
UpperCamelCase :Optional[int] = (max_retry_error,)
raise
def raise_connection_error(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : str ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_a )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _a ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _a ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _a ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _A ( *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :int = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_a , **_a ) as tmp_dir:
try:
os.chdir(_a )
yield
finally:
os.chdir(_a )
@contextmanager
def _A ( ):
import gc
gc.collect()
UpperCamelCase :str = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _A ( ):
import gc
gc.collect()
UpperCamelCase :Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ):
return deepcopy(_a ).integers(0 , 100 , 10 ).tolist() == deepcopy(_a ).integers(0 , 100 , 10 ).tolist()
def _A ( SCREAMING_SNAKE_CASE__ : int ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ):
try:
return func(*_a , **_a )
except HTTPError as err:
if str(_a ).startswith('''500''' ) or str(_a ).startswith('''502''' ):
pytest.xfail(str(_a ) )
raise err
return decorator.decorator(_wrapper , _a )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :Optional[int] = returncode
UpperCamelCase :Dict = stdout
UpperCamelCase :str = stderr
async def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ):
while True:
UpperCamelCase :List[str] = await stream.readline()
if line:
callback(_a )
else:
break
async def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : int=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(_a ) )
UpperCamelCase :Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_a , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase :Optional[Any] = []
UpperCamelCase :Optional[int] = []
def tee(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict="" ):
UpperCamelCase :int = line.decode('''utf-8''' ).rstrip()
sink.append(_a )
if not quiet:
print(_a , _a , file=_a )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE__ : tee(_a , _a , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE__ : tee(_a , _a , sys.stderr , label='''stderr:''' ) ),
] , timeout=_a , )
return _RunOutput(await p.wait() , _a , _a )
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=180 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
UpperCamelCase :Optional[Any] = asyncio.get_event_loop()
UpperCamelCase :List[str] = loop.run_until_complete(
_stream_subprocess(_a , env=_a , stdin=_a , timeout=_a , quiet=_a , echo=_a ) )
UpperCamelCase :Tuple = " ".join(_a )
if result.returncode > 0:
UpperCamelCase :List[Any] = "\n".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def _A ( ):
UpperCamelCase :int = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
UpperCamelCase :str = re.sub(R'''^gw''' , '''''' , _a , 0 , re.M )
return int(_a )
def _A ( ):
UpperCamelCase :List[Any] = 29500
UpperCamelCase :Dict = pytest_xdist_worker_id()
return port + uniq_delta
| 658 |
import comet # From: unbabel-comet
import torch
import datasets
lowercase : List[Any] = datasets.logging.get_logger(__name__)
lowercase : List[str] = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowercase : Dict = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowercase : Tuple = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
if self.config_name == "default":
snake_case_ : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
snake_case_ : int = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Dict:
if gpus is None:
snake_case_ : Union[str, Any] = 1 if torch.cuda.is_available() else 0
snake_case_ : str = {"src": sources, "mt": predictions, "ref": references}
snake_case_ : Dict = [dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for t in zip(*data.values() )]
snake_case_ , snake_case_ : Union[str, Any] = self.scorer.predict(_SCREAMING_SNAKE_CASE , gpus=_SCREAMING_SNAKE_CASE , progress_bar=_SCREAMING_SNAKE_CASE )
return {"mean_score": mean_score, "scores": scores}
| 568 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : List[Any] = BertJapaneseTokenizer
_lowercase : Dict = False
_lowercase : Union[str, Any] = True
def UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
super().setUp()
A_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Dict ) -> Any:
"""simple docstring"""
A_ = '''こんにちは、世界。 \nこんばんは、世界。'''
A_ = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
A_ ,A_ = self.get_input_output_texts(lowerCamelCase__ )
A_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
A_ = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
return text, ids
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
A_ = self.tokenizer_class(self.vocab_file )
A_ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(lowerCamelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
A_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(lowerCamelCase__ )
A_ = '''こんにちは、世界。\nこんばんは、世界。'''
A_ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
A_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCamelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , '''rb''' ) as handle:
A_ = pickle.load(lowerCamelCase__ )
A_ = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
A_ = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
try:
A_ = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
try:
A_ = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
A_ = MecabTokenizer(do_lower_case=lowerCamelCase__ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
try:
A_ = MecabTokenizer(
do_lower_case=lowerCamelCase__ , normalize_text=lowerCamelCase__ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A_ = MecabTokenizer(normalize_text=lowerCamelCase__ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
A_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(lowerCamelCase__ )
A_ = '''こんにちは、世界。\nこんばんは、世界。'''
A_ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
A_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCamelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , '''rb''' ) as handle:
A_ = pickle.load(lowerCamelCase__ )
A_ = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_sudachi
def UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
A_ = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
A_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
A_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
A_ = SudachiTokenizer(do_lower_case=lowerCamelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
A_ = SudachiTokenizer(normalize_text=lowerCamelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
A_ = SudachiTokenizer(trim_whitespace=lowerCamelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
A_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(lowerCamelCase__ )
A_ = '''こんにちは、世界。\nこんばんは、世界。'''
A_ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
A_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCamelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , '''rb''' ) as handle:
A_ = pickle.load(lowerCamelCase__ )
A_ = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_jumanpp
def UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
A_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
A_ = JumanppTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
A_ = JumanppTokenizer(normalize_text=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
A_ = JumanppTokenizer(trim_whitespace=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
A_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
A_ = {}
for i, token in enumerate(lowerCamelCase__ ):
A_ = i
A_ = WordpieceTokenizer(vocab=lowerCamelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
A_ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
A_ = tokenizer.subword_tokenizer
A_ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(lowerCamelCase__ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
A_ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(lowerCamelCase__ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
A_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
A_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCamelCase__ )
A_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCamelCase__ )
A_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
A_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : Optional[int] = BertJapaneseTokenizer
_lowercase : int = False
def UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
super().setUp()
A_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase ( self : List[Any] , **lowerCamelCase__ : str ) -> str:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
A_ = '''こんにちは、世界。 \nこんばんは、世界。'''
A_ = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
A_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
A_ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
lowerCamelCase__ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
A_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
A_ = {}
for i, token in enumerate(lowerCamelCase__ ):
A_ = i
A_ = CharacterTokenizer(vocab=lowerCamelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
A_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
A_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCamelCase__ )
A_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCamelCase__ )
A_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
A_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
A_ = '''cl-tohoku/bert-base-japanese'''
A_ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
A_ = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
A_ = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 563 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
while b > 0:
if b & 1:
A_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 563 | 1 |
'''simple docstring'''
from typing import Any
class lowercase__ :
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = data
_UpperCamelCase : str = None
class lowercase__ :
def __init__( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = None
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.head
while temp is not None:
print(temp.data ,end=' ' )
_UpperCamelCase : List[Any] = temp.next
print()
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : List[Any] = Node(lowerCamelCase__ )
_UpperCamelCase : Any = self.head
_UpperCamelCase : Dict = new_node
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_UpperCamelCase : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCamelCase : List[str] = node_a.next
_UpperCamelCase : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCamelCase : Any = node_a.next
if node_a is None or node_a is None:
return
_UpperCamelCase , _UpperCamelCase : Any = node_a.data, node_a.data
if __name__ == "__main__":
snake_case_ : Tuple = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 195 |
'''simple docstring'''
from pathlib import Path
import fire
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = Path(UpperCAmelCase_ )
_UpperCamelCase : str = Path(UpperCAmelCase_ )
dest_dir.mkdir(exist_ok=UpperCAmelCase_ )
for path in src_dir.iterdir():
_UpperCamelCase : int = [x.rstrip() for x in list(path.open().readlines() )][:n]
_UpperCamelCase : Any = dest_dir.joinpath(path.name )
print(UpperCAmelCase_ )
dest_path.open('w' ).write('\n'.join(UpperCAmelCase_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 195 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : list ) -> list:
'''simple docstring'''
if len(lowercase__ ) <= 1:
return lst
lowerCAmelCase_ :Optional[int] = 1
while i < len(lowercase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowerCAmelCase_ :int = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowerCAmelCase_ :List[str] = 1
return lst
if __name__ == "__main__":
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 709 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-1'
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-2'
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-3'
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-4'
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A , __A , __A , __A , __A , __A , __A = True , ) -> Any:
super()._init_()
lowerCAmelCase_ :Dict = StableDiffusionPipeline.from_pretrained(__A )
lowerCAmelCase_ :Optional[int] = StableDiffusionPipeline.from_pretrained(__A )
lowerCAmelCase_ :List[str] = StableDiffusionPipeline.from_pretrained(__A )
lowerCAmelCase_ :Dict = StableDiffusionPipeline(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , safety_checker=__A , feature_extractor=__A , requires_safety_checker=__A , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , __A ) for k in self.config.keys() if not k.startswith("""_""" )}
def __lowerCAmelCase ( self , __A = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase_ :List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.enable_attention_slicing(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> Dict:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> Dict:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> str:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> Any:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> List[Any]:
lowerCAmelCase_ :List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(__A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCAmelCase_ :Union[str, Any] = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCAmelCase_ :Any = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCAmelCase_ :Dict = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCAmelCase_ :Union[str, Any] = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 256 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
_lowerCAmelCase : Optional[int] = DisjunctiveConstraint(__UpperCamelCase)
self.assertTrue(isinstance(dc.token_ids, __UpperCamelCase))
with self.assertRaises(__UpperCamelCase):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(__UpperCamelCase):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase):
DisjunctiveConstraint(__UpperCamelCase) # fails here
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = [[1, 2, 3], [1, 2, 4]]
_lowerCAmelCase : List[str] = DisjunctiveConstraint(__UpperCamelCase)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = dc.update(1)
_lowerCAmelCase : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = dc.update(2)
_lowerCAmelCase : List[str] = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = dc.update(3)
_lowerCAmelCase : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_lowerCAmelCase : Tuple = DisjunctiveConstraint(__UpperCamelCase)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 500 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self )-> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCAmelCase ( self )-> Tuple:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def __UpperCAmelCase ( self )-> List[Any]:
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 )
__lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 367 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Optional[Any] = []
if isinstance(__A , __A ):
for v in tree.values():
shapes.extend(_fetch_dims(__A ) )
elif isinstance(__A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__A ) )
elif isinstance(__A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : str = []
for d in reversed(__A ):
idx.append(flat_idx % d )
_a : List[str] = flat_idx // d
return tuple(reversed(__A ) )
@torch.jit.ignore
def UpperCAmelCase_ ( A , A , A , A = None , A = None , ):
'''simple docstring'''
def reduce_edge_list(A ) -> None:
_a : Optional[int] = True
for i in range(len(__A ) ):
_a : int = -1 * (i + 1)
l[reversed_idx] &= tally
_a : str = l[reversed_idx]
if start_edges is None:
_a : Optional[int] = [s == 0 for s in start]
reduce_edge_list(__A )
if end_edges is None:
_a : List[str] = [e == (d - 1) for e, d in zip(__A , __A )]
reduce_edge_list(__A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__A ) == 0:
return [()]
elif len(__A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_a : List[Tuple[slice, ...]] = []
_a : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__A , __A ):
if s == e:
path_list.append(slice(__A , s + 1 ) )
else:
break
_a : Tuple[slice, ...] = tuple(__A )
_a : Tuple = len(__A )
# start == end, and we're done
if divergence_idx == len(__A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_a : List[Any] = start[divergence_idx]
return tuple(
path + (slice(__A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_a : List[str] = end[divergence_idx]
return tuple(
path + (slice(__A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_a : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCAmelCase_ ( A , A , A , A ):
'''simple docstring'''
_a : List[str] = t.shape[:no_batch_dims]
_a : str = list(_flat_idx_to_idx(__A , __A ) )
# _get_minimal_slice_set is inclusive
_a : List[str] = list(_flat_idx_to_idx(flat_end - 1 , __A ) )
# Get an ordered list of slices to perform
_a : int = _get_minimal_slice_set(
__A , __A , __A , )
_a : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCAmelCase_ ( A , A , A , A , A = False , A = None , A = False , ):
'''simple docstring'''
if not (len(__A ) > 0):
raise ValueError('Must provide at least one input' )
_a : Dict = [shape[:no_batch_dims] for shape in _fetch_dims(__A )]
_a : Union[str, Any] = tuple([max(__A ) for s in zip(*__A )] )
def _prep_inputs(A ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_a : Tuple = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_a : int = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_a : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_a : Dict[str, Any] = tensor_tree_map(_prep_inputs , __A )
_a : str = None
if _out is not None:
_a : int = tensor_tree_map(lambda A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_a : List[str] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_a : Union[str, Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(A ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_a : Any = 0
_a : List[Any] = prepped_outputs
for _ in range(__A ):
# Chunk the input
if not low_mem:
_a : Dict = _select_chunk
else:
_a : Optional[int] = partial(
_chunk_slice , flat_start=__A , flat_end=min(__A , i + chunk_size ) , no_batch_dims=len(__A ) , )
_a : Dict[str, Any] = tensor_tree_map(__A , __A )
# Run the layer on the chunk
_a : Dict = layer(**__A )
# Allocate space for the output
if out is None:
_a : Dict = tensor_tree_map(lambda A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __A )
# Put the chunk in its pre-allocated space
if isinstance(__A , __A ):
def assign(A , A ) -> None:
for k, v in da.items():
if isinstance(__A , __A ):
assign(__A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_a : List[str] = da[k]
assign(__A , __A )
elif isinstance(__A , __A ):
for xa, xa in zip(__A , __A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_a : Optional[int] = xa
elif isinstance(__A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_a : Tuple = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
_a : Optional[int] = tensor_tree_map(lambda A : t.view(orig_batch_dims + t.shape[1:] ) , __A )
return out
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 5_1_2 , ) -> int:
_a : List[str] = max_chunk_size
_a : Optional[int] = None
_a : Optional[tuple] = None
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_a : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_a : Dict = [c for c in candidates if c > min_chunk_size]
_a : str = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCamelCase_ ) -> bool:
try:
with torch.no_grad():
fn(*_UpperCamelCase , chunk_size=_UpperCamelCase )
return True
except RuntimeError:
return False
_a : int = 0
_a : Union[str, Any] = len(_UpperCamelCase ) - 1
while i > min_viable_chunk_size_index:
_a : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
_a : str = (min_viable_chunk_size_index + i) // 2
else:
_a : Optional[int] = i
_a : List[Any] = (i + len(_UpperCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
_a : Dict = True
for aa, aa in zip(_UpperCamelCase , _UpperCamelCase ):
assert type(_UpperCamelCase ) == type(_UpperCamelCase )
if isinstance(_UpperCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
_a : Dict = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase_ : x[0] )]
_a : Any = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase_ : x[0] )]
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase )
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> int:
_a : List[str] = True
_a : tuple = tree_map(lambda lowerCamelCase_ : a.shape if isinstance(_UpperCamelCase , torch.Tensor ) else a , _UpperCamelCase , _UpperCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_UpperCamelCase )
_a : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , _UpperCamelCase )
else:
# Otherwise, we can reuse the precomputed value
_a : List[str] = False
if not consistent:
_a : Optional[Any] = self._determine_favorable_chunk_size(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
_a : str = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 715 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 424 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
_UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
SCREAMING_SNAKE_CASE_ : Any = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPTextModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : Tuple = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('RGB' )
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = sd_pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : int = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'french fries'
SCREAMING_SNAKE_CASE_ : List[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = output.images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE_ : str = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = [inputs['prompt']] * 2
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE_ : List[Any] = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = image / 2 + 0.5
SCREAMING_SNAKE_CASE_ : int = image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_ : List[str] = image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' )
SCREAMING_SNAKE_CASE_ : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[Any] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='pt' ) )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = components['vae']
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE_ : str = vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE_ : Tuple = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , lowerCAmelCase__=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
SCREAMING_SNAKE_CASE_ : str = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : str = self.get_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Any = self.get_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 0
def callback_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE_ : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE_ : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
SCREAMING_SNAKE_CASE_ : Dict = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
SCREAMING_SNAKE_CASE_ : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
SCREAMING_SNAKE_CASE_ : Optional[int] = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ : Tuple = inputs['image'].resize((5_0_4, 5_0_4) )
SCREAMING_SNAKE_CASE_ : Dict = 'timbrooks/instruct-pix2pix'
SCREAMING_SNAKE_CASE_ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Dict = pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = output.images[0]
SCREAMING_SNAKE_CASE_ : Any = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 101 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Any = False, False, False
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
__UpperCamelCase = field(default='''Audio''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def __call__( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.pa_type
def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, bytes, dict] ) -> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__lowercase = BytesIO()
sf.write(__lowerCamelCase , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__lowercase = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
__lowercase = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32_767
__lowercase = BytesIO(bytes() )
sf.write(__lowerCamelCase , __lowerCamelCase , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCAmelCase ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
__lowercase , __lowercase = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
__lowercase = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
__lowercase = token_per_repo_id or {}
__lowercase = path.split('::' )[-1]
try:
__lowercase = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['repo_id']
__lowercase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__lowercase = None
with xopen(__lowerCamelCase , 'rb' , use_auth_token=__lowerCamelCase ) as f:
__lowercase , __lowercase = sf.read(__lowerCamelCase )
else:
__lowercase , __lowercase = sf.read(__lowerCamelCase )
__lowercase = array.T
if self.mono:
__lowercase = librosa.to_mono(__lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__lowercase = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate )
__lowercase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
__lowercase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
__lowercase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
__lowercase = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__lowercase = storage.field('bytes' )
else:
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__lowercase = storage.field('path' )
else:
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Any ):
with xopen(__lowerCamelCase , 'rb' ) as f:
__lowercase = f.read()
return bytes_
__lowercase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowercase = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
| 375 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _UpperCAmelCase ( A=None ):
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase__ =subparsers.add_parser("env" )
else:
UpperCAmelCase__ =argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=A , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=A )
return parser
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =torch.__version__
UpperCAmelCase__ =torch.cuda.is_available()
UpperCAmelCase__ =is_xpu_available()
UpperCAmelCase__ =is_npu_available()
UpperCAmelCase__ ="Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(A ):
UpperCAmelCase__ =load_config_from_file(args.config_file ).to_dict()
UpperCAmelCase__ ={
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(A ),
"PyTorch NPU available": str(A ),
"System RAM": F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
UpperCAmelCase__ =torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
UpperCAmelCase__ =(
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(A , A )
else F"""\t{accelerate_config}"""
)
print(A )
UpperCAmelCase__ =accelerate_config
return info
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =env_command_parser()
UpperCAmelCase__ =parser.parse_args()
env_command(A )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 510 |
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A , A ) ) )
def _UpperCAmelCase ( A ):
'''simple docstring'''
if point:
if isinstance(A , A ):
for item in point:
if not isinstance(A , (int, float) ):
UpperCAmelCase__ =(
"Expected a list of numbers as input, found "
F"""{type(A ).__name__}"""
)
raise TypeError(A )
else:
UpperCAmelCase__ =F"""Expected a list of numbers as input, found {type(A ).__name__}"""
raise TypeError(A )
else:
raise ValueError("Missing an input" )
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A , A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a : List[Any] = logging.get_logger(__name__)
def __magic_name__ ( UpperCamelCase : Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(UpperCamelCase , np.ndarray ):
return list(tensor.shape )
a__ = tf.shape(UpperCamelCase )
if tensor.shape == tf.TensorShape(UpperCamelCase ):
return dynamic
a__ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCamelCase )]
def __magic_name__ ( UpperCamelCase : tf.Tensor , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCamelCase , name=UpperCamelCase )
def __magic_name__ ( UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str=1E-5 , UpperCamelCase : List[str]=-1 ) -> List[str]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCamelCase , UpperCamelCase ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
a__ , a__ = tf.nn.moments(UpperCamelCase , axes=[axis] , keepdims=UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
a__ = [1] * inputs.shape.rank
a__ = shape_list(UpperCamelCase )[axis]
a__ = tf.reshape(UpperCamelCase , UpperCamelCase )
a__ = tf.reshape(UpperCamelCase , UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
a__ = tf.nn.batch_normalization(
UpperCamelCase , UpperCamelCase , UpperCamelCase , offset=UpperCamelCase , scale=UpperCamelCase , variance_epsilon=UpperCamelCase , )
return outputs
def __magic_name__ ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : Any=-1 ) -> Dict:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
a__ = tf.shape(UpperCamelCase )
a__ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
a__ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCamelCase , UpperCamelCase )
def __magic_name__ ( UpperCamelCase : tf.Tensor ) -> tf.Tensor:
if not isinstance(UpperCamelCase , tf.Tensor ):
a__ = tf.convert_to_tensor(UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
a__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
a__ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
a__ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __magic_name__ ( UpperCamelCase : tf.Tensor , UpperCamelCase : int , UpperCamelCase : str = "input_ids" ) -> None:
tf.debugging.assert_less(
UpperCamelCase , tf.cast(UpperCamelCase , dtype=tensor.dtype ) , message=(
f'The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCamelCase )}) must be smaller than the embedding '
f'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def __magic_name__ ( UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] ) -> List[str]:
a__ = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
a__ = [x for x in data if len(UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
f'bytes: {bad_attributes}' )
a__ = np.asarray(UpperCamelCase )
a__ = 1
a__ = np.array_split(UpperCamelCase , UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
a__ = np.array_split(UpperCamelCase , UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCamelCase ):
a__ = chunk_data
else:
a__ = data
def __magic_name__ ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ) -> List[Any]:
if name in group.attrs:
a__ = [n.decode('utf8' ) if hasattr(UpperCamelCase , 'decode' ) else n for n in group.attrs[name]]
else:
a__ = []
a__ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(UpperCamelCase , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def __magic_name__ ( UpperCamelCase : Optional[Any] ) -> Any:
def _expand_single_ad_tensor(UpperCamelCase : int ):
if isinstance(UpperCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCamelCase )
| 273 |
"""simple docstring"""
from itertools import product
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : int ) -> list[int]:
a__ = sides_number
a__ = max_face_number * dice_number
a__ = [0] * (max_total + 1)
a__ = 1
a__ = range(UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase , repeat=UpperCamelCase ):
a__ = sum(UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __magic_name__ ( ) -> float:
a__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a__ = 0
a__ = 9
a__ = 4 * 9
a__ = 6
for peter_total in range(UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a__ = (4**9) * (6**6)
a__ = peter_wins_count / total_games_number
a__ = round(UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 273 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Initialise PyTorch model
_SCREAMING_SNAKE_CASE : Any = LxmertConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
_SCREAMING_SNAKE_CASE : str = LxmertForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), __lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase__ =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 381 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_2 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=0.02 , __lowerCamelCase=0 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
_SCREAMING_SNAKE_CASE : str = seq_length
_SCREAMING_SNAKE_CASE : str = is_training
_SCREAMING_SNAKE_CASE : str = use_input_mask
_SCREAMING_SNAKE_CASE : int = use_labels
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = projection_dim
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : int = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
_SCREAMING_SNAKE_CASE : int = dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
_SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : Tuple = scope
_SCREAMING_SNAKE_CASE : Any = bos_token_id
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = input_mask.numpy()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = input_mask.shape
_SCREAMING_SNAKE_CASE : str = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[str]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = TFBlipTextModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , training=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = config_and_inputs
_SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (TFBlipTextModel,) if is_tf_available() else ()
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = BlipTextModelTester(self )
_SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
pass
def UpperCamelCase_ ( self ) -> Tuple:
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCamelCase_ ( self ) -> Dict:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
@slow
def UpperCamelCase_ ( self ) -> int:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlipTextModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase=True ) -> int:
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowerCamelCase ) | 381 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( a : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = module
snake_case__ = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , )
snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : Any = 2.109_6595_5269_2574
_lowercase : Tuple = '''Hello my name is'''
_lowercase : List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : List[str] = 10
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = AutoTokenizer.from_pretrained(self.model_name)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : str):
'''simple docstring'''
super().setUp()
# Models and tokenizer
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""")
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config"""))
snake_case__ = config.to_dict()
snake_case__ = config.to_diff_dict()
snake_case__ = config.to_json_string()
def __magic_name__ ( self : Dict):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
snake_case__ = self.model_fpaa.get_memory_footprint()
snake_case__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
snake_case__ = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
snake_case__ = True
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__):
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__):
# Tries with `str`
self.model_abit.to("""cpu""")
with self.assertRaises(UpperCamelCase__):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0"""))
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
snake_case__ = self.model_fpaa.to(torch.floataa)
snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
snake_case__ = self.model_fpaa.to("""cpu""")
# Check this does not throw an error
snake_case__ = self.model_fpaa.half()
# Check this does not throw an error
snake_case__ = self.model_fpaa.float()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __magic_name__ ( cls : Optional[Any]):
'''simple docstring'''
snake_case__ = """t5-small"""
snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
snake_case__ = AutoTokenizer.from_pretrained(cls.model_name)
snake_case__ = """Translate in German: Hello, my dog is cute"""
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any):
'''simple docstring'''
from transformers import TaForConditionalGeneration
snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case__ = None
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
snake_case__ = modules
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
# test with `flan-t5-small`
snake_case__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0)
snake_case__ = model.generate(**UpperCamelCase__)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
super().setUp()
# model_name
snake_case__ = """bigscience/bloom-560m"""
snake_case__ = """t5-small"""
# Different types of model
snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Sequence classification model
snake_case__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# CausalLM model
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
# Seq2seq model
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""")
def __magic_name__ ( self : List[str]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case__ = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""")
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""")
# Second real batch
snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = """facebook/opt-350m"""
super().setUp()
def __magic_name__ ( self : Any):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""):
return
# Step 1: freeze all parameters
snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
snake_case__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case__ = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__)):
snake_case__ = LoRALayer(module.q_proj , rank=1_6)
snake_case__ = LoRALayer(module.k_proj , rank=1_6)
snake_case__ = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case__ = model.forward(**UpperCamelCase__)
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(UpperCamelCase__ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : List[Any] = '''gpt2-xl'''
_lowercase : Any = 3.3191_8548_5415_2187
| 654 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize):
'''simple docstring'''
snake_case__ = """bilinear"""
snake_case__ = max_size
snake_case__ = short_edge_length
def __call__( self : List[str] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = []
for img in imgs:
snake_case__ , snake_case__ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
snake_case__ , snake_case__ = size, scale * w
else:
snake_case__ , snake_case__ = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = newh * scale
snake_case__ = neww * scale
snake_case__ = int(neww + 0.5)
snake_case__ = int(newh + 0.5)
if img.dtype == np.uinta:
snake_case__ = Image.fromarray(UpperCamelCase__)
snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
snake_case__ = np.asarray(UpperCamelCase__)
else:
snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
snake_case__ = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
snake_case__ = cfg.INPUT.FORMAT
snake_case__ = cfg.SIZE_DIVISIBILITY
snake_case__ = cfg.PAD_VALUE
snake_case__ = cfg.INPUT.MAX_SIZE_TEST
snake_case__ = cfg.MODEL.DEVICE
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
snake_case__ = [im.shape[-2:] for im in images]
snake_case__ = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
snake_case__ = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
snake_case__ = torch.tensor([im.shape[:2] for im in images])
snake_case__ = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ):
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 654 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase :List[Any] = logging.get_logger(__name__)
class _a( __A ):
lowerCamelCase__ :Tuple = ['audio_values', 'audio_mask']
def __init__( self , __snake_case=2_0_4_8 , __snake_case=1 , __snake_case=[1_6, 1_6] , __snake_case=1_2_8 , __snake_case=4_4_1_0_0 , __snake_case=8_6 , __snake_case=2_0_4_8 , __snake_case=0.0 , **__snake_case , ) -> List[Any]:
'''simple docstring'''
super().__init__(
feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case , )
_snake_case : Union[str, Any] = spectrogram_length
_snake_case : Optional[Any] = num_channels
_snake_case : str = patch_size
_snake_case : Dict = feature_size // self.patch_size[1]
_snake_case : Dict = n_fft
_snake_case : List[str] = sampling_rate // hop_length_to_sampling_rate
_snake_case : List[Any] = sampling_rate
_snake_case : List[str] = padding_value
_snake_case : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__snake_case , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__snake_case , norm="slaney" , mel_scale="slaney" , ).T
def lowercase ( self , __snake_case ) -> np.ndarray:
'''simple docstring'''
_snake_case : Optional[Any] = spectrogram(
__snake_case , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
_snake_case : Optional[int] = log_spec[:, :-1]
_snake_case : int = log_spec - 20.0
_snake_case : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __snake_case , __snake_case = None , __snake_case = True , __snake_case = None , __snake_case = False , __snake_case = False , **__snake_case , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_snake_case : Optional[Any] = isinstance(__snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_snake_case : str = is_batched_numpy or (
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
_snake_case : Union[str, Any] = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case : str = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_snake_case : List[Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __snake_case ):
_snake_case : Optional[Any] = [np.asarray(__snake_case , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_snake_case : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_snake_case : Tuple = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_snake_case : int = np.array(__snake_case ).astype(np.floataa )
# convert into correct format for padding
_snake_case : Optional[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_snake_case : int = np.ones([len(__snake_case ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_snake_case : Union[str, Any] = padded_audio_features * self.padding_value
for i in range(len(__snake_case ) ):
_snake_case : int = audio_features[i]
_snake_case : List[str] = feature
# return as BatchFeature
if return_attention_mask:
_snake_case : Union[str, Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
_snake_case : Dict = {"audio_values": padded_audio_features}
_snake_case : str = BatchFeature(data=__snake_case , tensor_type=__snake_case )
return encoded_inputs | 278 |
import datasets
from .evaluate import evaluate
__lowerCAmelCase :Any = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__lowerCAmelCase :Union[str, Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__lowerCAmelCase :Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a( datasets.Metric ):
def lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def lowercase ( self , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : str = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_snake_case : Union[str, Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_snake_case : Union[str, Any] = evaluate(dataset=__snake_case , predictions=__snake_case )
return score | 278 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : str = ShapEPipeline
UpperCAmelCase__ : Union[str, Any] = ["prompt"]
UpperCAmelCase__ : List[str] = ["prompt"]
UpperCAmelCase__ : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Optional[Any] = False
@property
def snake_case_ ( self ) -> List[Any]:
return 32
@property
def snake_case_ ( self ) -> List[Any]:
return 32
@property
def snake_case_ ( self ) -> Dict:
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[int]:
return 8
@property
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCamelCase : str = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : List[Any] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCamelCase : Tuple = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def snake_case_ ( self ) -> str:
UpperCamelCase : List[Any] = self.dummy_prior
UpperCamelCase : Union[str, Any] = self.dummy_text_encoder
UpperCamelCase : List[str] = self.dummy_tokenizer
UpperCamelCase : Dict = self.dummy_renderer
UpperCamelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, )
UpperCamelCase : Tuple = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Dict:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def snake_case_ ( self ) -> Any:
UpperCamelCase : int = 'cpu'
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = output.images[0]
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase : Optional[Any] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[Any] = torch_device == 'cpu'
UpperCamelCase : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase : Dict = batch_size * [inputs[key]]
UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> int:
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
UpperCamelCase : Optional[Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
UpperCamelCase : Tuple = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase : List[Any] = pipe(
'a shark', generator=SCREAMING_SNAKE_CASE_, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , """Tatoeba directory does not exist.""" )
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
A__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng" | 337 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a : List[str] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_a : List[str] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( a : Dict , a : int , a : List[Any] , a : Tuple , a : str ):
"""simple docstring"""
for attribute in key.split("." ):
_snake_case : int = getattr(_lowercase , _lowercase )
if weight_type is not None:
_snake_case : Dict = getattr(_lowercase , _lowercase ).shape
else:
_snake_case : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case : str = value
elif weight_type == "weight_g":
_snake_case : Dict = value
elif weight_type == "weight_v":
_snake_case : int = value
elif weight_type == "bias":
_snake_case : List[Any] = value
else:
_snake_case : int = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a__ ( a : int , a : Dict ):
"""simple docstring"""
_snake_case : Optional[int] = []
_snake_case : Optional[int] = fairseq_model.state_dict()
_snake_case : Any = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_snake_case : Any = None
for name, value in fairseq_dict.items():
_snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == "group" , )
_snake_case : Any = True
elif name.split("." )[0] == "proj":
_snake_case : Optional[Any] = fairseq_model.proj
_snake_case : List[str] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_snake_case : Union[str, Any] = True
if "*" in mapped_key:
_snake_case : Any = name.split(_lowercase )[0].split("." )[-2]
_snake_case : str = mapped_key.replace("*" , _lowercase )
if "weight_g" in name:
_snake_case : List[Any] = "weight_g"
elif "weight_v" in name:
_snake_case : Optional[int] = "weight_v"
elif "bias" in name:
_snake_case : int = "bias"
elif "weight" in name:
_snake_case : Tuple = "weight"
else:
_snake_case : Any = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def a__ ( a : Dict , a : int , a : List[Any] , a : int , a : Optional[int] ):
"""simple docstring"""
_snake_case : int = full_name.split("conv_layers." )[-1]
_snake_case : Optional[int] = name.split("." )
_snake_case : int = int(items[0] )
_snake_case : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case : Optional[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowercase )
def a__ ( a : Optional[Any] ):
"""simple docstring"""
_snake_case , _snake_case : Dict = emb.weight.shape
_snake_case : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
_snake_case : Tuple = emb.weight.data
return lin_layer
def a__ ( a : List[str] ):
"""simple docstring"""
with open(_lowercase , "r" , encoding="utf-8" ) as f:
_snake_case : Dict = f.readlines()
_snake_case : Any = [line.split(" " )[0] for line in lines]
_snake_case : List[str] = len(_lowercase )
_snake_case : Optional[Any] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowercase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def a__ ( a : Dict , a : List[Any] , a : List[Any] , a : Union[str, Any] , a : Any , a : List[Any] , a : Optional[Any] , ):
"""simple docstring"""
_snake_case : Union[str, Any] = WavaVecaConfig.from_pretrained(_lowercase )
_snake_case : Tuple = SpeechaTextaConfig.from_pretrained(
_lowercase , vocab_size=_lowercase , decoder_layers=_lowercase , do_stable_layer_norm=_lowercase )
_snake_case : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
_snake_case , _snake_case , _snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
_snake_case : List[str] = model[0].eval()
# set weights for wav2vec2 encoder
_snake_case : List[str] = WavaVecaModel(_lowercase )
_snake_case : int = recursively_load_weights_wavaveca(model.encoder , _lowercase )
_snake_case : Optional[int] = SpeechaTextaForCausalLM(_lowercase )
_snake_case , _snake_case : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowercase )
# set output linear layer
unexpected_keys.remove("embed_out" )
_snake_case : int = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
_snake_case : List[str] = SpeechEncoderDecoderModel(encoder=_lowercase , decoder=_lowercase )
_snake_case : Optional[int] = False
# add projection layer
_snake_case : List[Any] = nn.Parameter(projection_layer.weight )
_snake_case : Union[str, Any] = nn.Parameter(projection_layer.bias )
_snake_case : List[Any] = create_vocab_dict(_lowercase )
with open(os.path.join(_lowercase , "vocab.json" ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
_snake_case : Dict = SpeechaTextaTokenizer(os.path.join(_lowercase , "vocab.json" ) )
tokenizer.save_pretrained(_lowercase )
_snake_case : List[Any] = hf_wavavec.config.to_dict()
_snake_case : Tuple = tokenizer.pad_token_id
_snake_case : List[Any] = tokenizer.bos_token_id
_snake_case : Dict = tokenizer.eos_token_id
_snake_case : Optional[int] = "speech_to_text_2"
_snake_case : Union[str, Any] = "wav2vec2"
_snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(_lowercase )
hf_wavavec.save_pretrained(_lowercase )
feature_extractor.save_pretrained(_lowercase )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10_224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
_a : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 718 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_ ):
_snake_case , _snake_case : Dict = text, pattern
_snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self ):
# searches pattern in text and returns index positions
_snake_case : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
_snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ )
if mismatch_index == -1:
positions.append(snake_case_ )
else:
_snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] )
_snake_case : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_a : List[Any] = """ABAABA"""
_a : str = """AB"""
_a : List[Any] = BoyerMooreSearch(text, pattern)
_a : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 87 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
__UpperCamelCase : Optional[Any] = 6_3_7_8_1_3_7.0
__UpperCamelCase : Tuple = 6_3_5_6_7_5_2.3_1_4_2_4_5
__UpperCamelCase : Any = 6378137
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = (AXIS_A - AXIS_B) / AXIS_A
__lowercase = atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
__lowercase = atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
__lowercase = radians(lowerCamelCase )
__lowercase = radians(lowerCamelCase )
# Equation
__lowercase = sin((phi_a - phi_a) / 2 )
__lowercase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__lowercase = sqrt(sin_sq_phi + (cos(lowerCamelCase ) * cos(lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = 'Hello world! cécé herlolip'
def lowercase__ ( __snake_case : str , __snake_case : str , __snake_case : bool ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = FairseqRobertaModel.from_pretrained(__snake_case )
roberta.eval() # disable dropout
UpperCAmelCase_ : Tuple = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
UpperCAmelCase_ : List[str] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , __snake_case )
UpperCAmelCase_ : List[Any] = XLMRobertaXLForSequenceClassification(__snake_case ) if classification_head else XLMRobertaXLForMaskedLM(__snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ : Tuple = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ : Optional[int] = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ : Any = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ : Any = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ : Tuple = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ : BertLayer = model.roberta.encoder.layer[i]
UpperCAmelCase_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
UpperCAmelCase_ : RobertaAttention = layer.attention
UpperCAmelCase_ : Any = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ : Optional[int] = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ : Dict = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ : Optional[int] = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ : Optional[Any] = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ : int = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ : Optional[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ : Any = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : Optional[Any] = roberta_layer.fca.weight
UpperCAmelCase_ : List[Any] = roberta_layer.fca.bias
# output
UpperCAmelCase_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : List[str] = roberta_layer.fca.weight
UpperCAmelCase_ : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ : List[Any] = roberta.model.classification_heads['mnli'].dense.weight
UpperCAmelCase_ : int = roberta.model.classification_heads['mnli'].dense.bias
UpperCAmelCase_ : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight
UpperCAmelCase_ : List[str] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
UpperCAmelCase_ : int = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ : List[Any] = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ : Dict = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ : List[Any] = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ : Optional[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ : torch.Tensor = roberta.encode(__snake_case ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ : List[str] = model(__snake_case )[0]
if classification_head:
UpperCAmelCase_ : Dict = roberta.model.classification_heads['mnli'](roberta.extract_features(__snake_case ) )
else:
UpperCAmelCase_ : Dict = roberta.model(__snake_case )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
UpperCAmelCase_ : Any = torch.allclose(__snake_case , __snake_case , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(__snake_case ).mkdir(parents=__snake_case , exist_ok=__snake_case )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__UpperCAmelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 406 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_A : Dict = logging.get_logger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : int = to_pil_image(lowercase_ )
lowerCamelCase__ : List[str] = pil_image.size
lowerCamelCase__ : Tuple = pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type='''dict''' , config=lowercase_ )
lowerCamelCase__ : str = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
lowerCamelCase__ : Union[str, Any] = [idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
lowerCamelCase__ : Optional[Any] = [word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowerCamelCase__ : Dict = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowerCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowerCamelCase__ : int = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowerCamelCase__ : Optional[Any] = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCamelCase__ : str = []
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
lowerCamelCase__ : List[Any] = [x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
lowerCamelCase__ : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
_UpperCAmelCase : Any = ["pixel_values"]
def __init__( self : Tuple , A : bool = True , A : Dict[str, int] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : float = 1 / 2_5_5 , A : bool = True , A : Union[float, Iterable[float]] = None , A : Union[float, Iterable[float]] = None , A : bool = True , A : Optional[str] = None , A : Optional[str] = "" , **A : Optional[Any] , ) ->None:
super().__init__(**__a )
lowerCamelCase__ : str = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
lowerCamelCase__ : Optional[Any] = get_size_dict(__a )
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : Union[str, Any] = size
lowerCamelCase__ : Optional[int] = resample
lowerCamelCase__ : Any = do_rescale
lowerCamelCase__ : List[str] = rescale_value
lowerCamelCase__ : Tuple = do_normalize
lowerCamelCase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCamelCase__ : str = apply_ocr
lowerCamelCase__ : List[Any] = ocr_lang
lowerCamelCase__ : Dict = tesseract_config
def __lowerCamelCase ( self : Dict , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BILINEAR , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[int] , ) ->np.ndarray:
lowerCamelCase__ : int = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}" )
lowerCamelCase__ : Union[str, Any] = (size["height"], size["width"])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __lowerCamelCase ( self : Tuple , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[Any] , ) ->np.ndarray:
return rescale(__a , scale=__a , data_format=__a , **__a )
def __lowerCamelCase ( self : str , A : np.ndarray , A : Union[float, Iterable[float]] , A : Union[float, Iterable[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[Any] , ) ->np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __lowerCamelCase ( self : str , A : ImageInput , A : bool = None , A : Dict[str, int] = None , A : str=None , A : bool = None , A : float = None , A : bool = None , A : Union[float, Iterable[float]] = None , A : Union[float, Iterable[float]] = None , A : bool = None , A : Optional[str] = None , A : Optional[str] = None , A : Optional[Union[str, TensorType]] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : Optional[int] , ) ->PIL.Image.Image:
lowerCamelCase__ : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Optional[int] = size if size is not None else self.size
lowerCamelCase__ : Tuple = get_size_dict(__a )
lowerCamelCase__ : Union[str, Any] = resample if resample is not None else self.resample
lowerCamelCase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Any = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCamelCase__ : Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCamelCase__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCamelCase__ : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Any = [to_numpy_array(__a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Any = []
for image in images:
lowerCamelCase__ : int = apply_tesseract(__a , __a , __a )
words_batch.append(__a )
boxes_batch.append(__a )
if do_resize:
lowerCamelCase__ : List[str] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
lowerCamelCase__ : Tuple = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
lowerCamelCase__ : Optional[int] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
lowerCamelCase__ : Dict = [to_channel_dimension_format(__a , __a ) for image in images]
lowerCamelCase__ : Any = BatchFeature(data={'''pixel_values''': images} , tensor_type=__a )
if apply_ocr:
lowerCamelCase__ : Union[str, Any] = words_batch
lowerCamelCase__ : List[str] = boxes_batch
return data
| 707 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = botoa.client('''iam''' )
lowerCamelCase__ : str = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=UpperCAmelCase , AssumeRolePolicyDocument=json.dumps(UpperCAmelCase , indent=2 ) )
lowerCamelCase__ : List[Any] = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=UpperCAmelCase , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(UpperCAmelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : int = botoa.client('''iam''' )
return iam_client.get_role(RoleName=UpperCAmelCase )["Role"]["Arn"]
def _a ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : str = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , UpperCAmelCase , )
lowerCamelCase__ : str = None
if credentials_configuration == 0:
lowerCamelCase__ : List[str] = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
lowerCamelCase__ : int = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
lowerCamelCase__ : Optional[int] = _ask_field('''AWS Access Key ID: ''' )
lowerCamelCase__ : int = aws_access_key_id
lowerCamelCase__ : Optional[int] = _ask_field('''AWS Secret Access Key: ''' )
lowerCamelCase__ : int = aws_secret_access_key
lowerCamelCase__ : Any = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
lowerCamelCase__ : List[str] = aws_region
lowerCamelCase__ : Tuple = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , UpperCAmelCase , )
if role_management == 0:
lowerCamelCase__ : Union[str, Any] = _ask_field('''Enter your IAM role name: ''' )
else:
lowerCamelCase__ : List[str] = '''accelerate_sagemaker_execution_role'''
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(UpperCAmelCase )
lowerCamelCase__ : Any = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCAmelCase , error_message='''Please enter yes or no.''' , )
lowerCamelCase__ : Tuple = None
if is_custom_docker_image:
lowerCamelCase__ : Optional[Any] = _ask_field('''Enter your Docker image: ''' , lambda UpperCAmelCase : str(UpperCAmelCase ).lower() )
lowerCamelCase__ : Dict = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCAmelCase , error_message='''Please enter yes or no.''' , )
lowerCamelCase__ : Any = None
if is_sagemaker_inputs_enabled:
lowerCamelCase__ : Any = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda UpperCAmelCase : str(UpperCAmelCase ).lower() , )
lowerCamelCase__ : List[Any] = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCAmelCase , error_message='''Please enter yes or no.''' , )
lowerCamelCase__ : List[Any] = None
if is_sagemaker_metrics_enabled:
lowerCamelCase__ : Union[str, Any] = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda UpperCAmelCase : str(UpperCAmelCase ).lower() , )
lowerCamelCase__ : int = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Union[str, Any] = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=UpperCAmelCase , error_message='''Please enter yes or no.''' , )
if use_dynamo:
lowerCamelCase__ : int = '''dynamo_'''
lowerCamelCase__ : Optional[int] = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowerCamelCase__ : Dict = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCAmelCase , error_message='''Please enter yes or no.''' , )
if use_custom_options:
lowerCamelCase__ : Dict = _ask_options(
'''Which mode do you want to use?''' , UpperCAmelCase , lambda UpperCAmelCase : TORCH_DYNAMO_MODES[int(UpperCAmelCase )] , default='''default''' , )
lowerCamelCase__ : int = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCAmelCase , error_message='''Please enter yes or no.''' , )
lowerCamelCase__ : Optional[int] = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCAmelCase , error_message='''Please enter yes or no.''' , )
lowerCamelCase__ : int = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
lowerCamelCase__ : Optional[int] = _ask_options(
UpperCAmelCase , UpperCAmelCase , lambda UpperCAmelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(UpperCAmelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowerCamelCase__ : Optional[Any] = _ask_field(UpperCAmelCase , lambda UpperCAmelCase : str(UpperCAmelCase ).lower() , default='''ml.p3.2xlarge''' )
lowerCamelCase__ : Optional[Any] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowerCamelCase__ : Any = _ask_field(
'''How many machines do you want use? [1]: ''' , UpperCAmelCase , default=1 , )
lowerCamelCase__ : str = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=UpperCAmelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=UpperCAmelCase , use_cpu=UpperCAmelCase , dynamo_config=UpperCAmelCase , eca_instance_type=UpperCAmelCase , profile=UpperCAmelCase , region=UpperCAmelCase , iam_role_name=UpperCAmelCase , mixed_precision=UpperCAmelCase , num_machines=UpperCAmelCase , sagemaker_inputs_file=UpperCAmelCase , sagemaker_metrics_file=UpperCAmelCase , )
| 130 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowerCAmelCase__ : List[str] =threading.Lock()
lowerCAmelCase__ : Optional[logging.Handler] =None
lowerCAmelCase__ : List[Any] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
lowerCAmelCase__ : Tuple =logging.WARNING
lowerCAmelCase__ : Dict =True
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[int] = os.getenv('TRANSFORMERS_VERBOSITY', A__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def a__ ( ):
return __name__.split('.' )[0]
def a__ ( ):
return logging.getLogger(_get_library_name() )
def a__ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE_ : Any = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE_ : int = False
def a__ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE_ : List[str] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE_ : int = None
def a__ ( ):
return log_levels
def a__ ( A__ = None ):
if name is None:
SCREAMING_SNAKE_CASE_ : List[str] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(A__ )
def a__ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( A__ ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(A__ )
def a__ ( ):
return set_verbosity(A__ )
def a__ ( ):
return set_verbosity(A__ )
def a__ ( ):
return set_verbosity(A__ )
def a__ ( ):
return set_verbosity(A__ )
def a__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( A__ ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(A__ )
def a__ ( A__ ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(A__ )
def a__ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE_ : str = False
def a__ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE_ : str = True
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(A__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(A__ )
def a__ ( self, *A__, **A__ ):
SCREAMING_SNAKE_CASE_ : Dict = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS', A__ )
if no_advisory_warnings:
return
self.warning(*A__, **A__ )
lowerCAmelCase__ : Optional[Any] =warning_advice
@functools.lru_cache(A__ )
def a__ ( self, *A__, **A__ ):
self.warning(*A__, **A__ )
lowerCAmelCase__ : List[str] =warning_once
class __lowercase :
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ): # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , lowerCAmelCase__ ):
"""simple docstring"""
def empty_fn(*lowerCAmelCase__ , **lowerCAmelCase__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
return
class __lowercase :
"""simple docstring"""
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ )
else:
return EmptyTqdm(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase__ : str =_tqdm_cls()
def a__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE_ : Any = True
hf_hub_utils.enable_progress_bars()
def a__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE_ : Optional[int] = False
hf_hub_utils.disable_progress_bars()
| 101 |
from __future__ import annotations
def a__ ( A__, A__ = None, A__ = None ):
if start is None:
SCREAMING_SNAKE_CASE_ : List[str] = 0
if end is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(A__ ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE_ : Tuple = (start + end) // 2
slowsort(A__, A__, A__ )
slowsort(A__, mid + 1, A__ )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sequence[mid], sequence[end]
slowsort(A__, A__, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 101 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__A : Dict = logging.getLogger(__name__)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Tuple=None ):
super().__init__(
lowercase__ , question_encoder_tokenizer=lowercase__ , generator_tokenizer=lowercase__ , index=lowercase__ , init_retrieval=lowercase__ , )
__lowercase : Optional[int] = None
def snake_case ( self : List[Any] , lowercase__ : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
__lowercase : Optional[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__lowercase : Optional[Any] = str(distributed_port + 1 )
__lowercase : Optional[int] = dist.new_group(ranks=lowercase__ , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def snake_case ( self : Union[str, Any] ):
return dist.get_rank(group=self.process_group ) == 0
def snake_case ( self : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Any=torch.floataa ):
__lowercase : str = torch.empty(lowercase__ , dtype=lowercase__ )
dist.scatter(lowercase__ , src=0 , scatter_list=lowercase__ , group=self.process_group )
return target_tensor
def snake_case ( self : int ):
__lowercase : Union[str, Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__lowercase : Optional[Any] = next((addr for addr in addrs if addr.startswith("e" )) , lowercase__ )
return ifname
def snake_case ( self : Union[str, Any] , lowercase__ : np.ndarray , lowercase__ : int ):
# single GPU training
if not dist.is_initialized():
__lowercase ,__lowercase : Tuple = self._main_retrieve(lowercase__ , lowercase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase__ )
# distributed training
__lowercase : List[str] = dist.get_world_size(group=self.process_group )
# gather logic
__lowercase : Tuple = None
if self._is_main():
__lowercase : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowercase__ )]
dist.gather(torch.tensor(lowercase__ ) , dst=0 , gather_list=lowercase__ , group=self.process_group )
# scatter logic
__lowercase : Tuple = question_hidden_states.shape[0]
__lowercase : Tuple = []
__lowercase : Union[str, Any] = []
if self._is_main():
assert len(lowercase__ ) == world_size
__lowercase ,__lowercase : List[str] = self._main_retrieve(torch.cat(lowercase__ ).numpy() , lowercase__ )
__lowercase ,__lowercase : int = torch.tensor(lowercase__ ), torch.tensor(lowercase__ )
__lowercase : Optional[int] = self._chunk_tensor(lowercase__ , lowercase__ )
__lowercase : Union[str, Any] = self._chunk_tensor(lowercase__ , lowercase__ )
__lowercase : Dict = self._scattered(lowercase__ , [n_queries, n_docs] , target_type=torch.intaa )
__lowercase : int = self._scattered(lowercase__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowercase__ )
| 281 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__A : Optional[int] = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__A : List[Any] = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__A : Optional[int] = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Any , lowercase__ : int=False ):
__lowercase : Optional[int] = spearmanr(lowercase__ , lowercase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 281 | 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__snake_case = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
__snake_case = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
__snake_case = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
__snake_case = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase (datasets.Metric ):
def snake_case_ ( self: List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Value('string',id='sequence' ),
'references': datasets.Value('string',id='sequence' ),
} ),codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'],reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
],)
def snake_case_ ( self: int,A_: str ):
'''simple docstring'''
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def snake_case_ ( self: str,A_: Any,A_: Optional[Any],A_: Any=0.9,A_: int=3,A_: Optional[Any]=0.5 ):
'''simple docstring'''
if NLTK_VERSION >= version.Version('3.6.5' ):
__UpperCamelCase = [
meteor_score.single_meteor_score(
word_tokenize(A_ ),word_tokenize(A_ ),alpha=A_,beta=A_,gamma=A_ )
for ref, pred in zip(A_,A_ )
]
else:
__UpperCamelCase = [
meteor_score.single_meteor_score(A_,A_,alpha=A_,beta=A_,gamma=A_ )
for ref, pred in zip(A_,A_ )
]
return {"meteor": np.mean(A_ )}
| 1 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowerCamelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCamelCase (__lowerCamelCase : List[Any] , __lowerCamelCase : int ) -> Optional[int]:
a__ = _distribute_shards(**__lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def _lowerCamelCase (__lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> int:
a__ = _split_gen_kwargs(__lowerCamelCase , __lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def _lowerCamelCase (__lowerCamelCase : Any , __lowerCamelCase : int ) -> Union[str, Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCamelCase ):
_number_of_shards_in_gen_kwargs(__lowerCamelCase )
else:
a__ = _number_of_shards_in_gen_kwargs(__lowerCamelCase )
assert out == expected
| 489 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self):
lowerCamelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion")
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCamelCase)
pipe.set_progress_bar_config(disable=UpperCamelCase)
lowerCamelCase__ = "A painting of a squirrel eating a burger "
lowerCamelCase__ = torch.manual_seed(0)
lowerCamelCase__ = pipe(
prompt=UpperCamelCase , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy").images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase)
lowerCamelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCamelCase)
pipe.to(UpperCamelCase)
pipe.set_progress_bar_config(disable=UpperCamelCase)
lowerCamelCase__ = generator.manual_seed(0)
lowerCamelCase__ = pipe(
prompt=UpperCamelCase , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy").images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def __UpperCAmelCase ( self):
lowerCamelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa)
pipe.to(UpperCamelCase)
pipe.set_progress_bar_config(disable=UpperCamelCase)
lowerCamelCase__ = "A painting of a squirrel eating a burger "
lowerCamelCase__ = torch.manual_seed(0)
lowerCamelCase__ = pipe(
prompt=UpperCamelCase , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy").images
lowerCamelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 426 |
'''simple docstring'''
def lowerCAmelCase( a__ : Tuple=2_8123 ):
'''simple docstring'''
lowerCamelCase__ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCamelCase__ = set()
lowerCamelCase__ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(a__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 426 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase_ : Optional[int] = TypeVar("T")
class a ( Generic[T] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> int:
_a : Tuple = data
_a : Node[T] | None = None
def __str__( self ) -> str:
return F'''{self.data}'''
class a ( Generic[T] ):
'''simple docstring'''
def __init__( self ) -> None:
_a : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
_a : Any = self.top
while node:
yield node.data
_a : Tuple = node.next
def __str__( self ) -> str:
return "->".join([str(lowerCamelCase_ ) for item in self] )
def __len__( self ) -> int:
return len(tuple(iter(self ) ) )
def __UpperCamelCase ( self ) -> bool:
return self.top is None
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
_a : Optional[Any] = Node(lowerCamelCase_ )
if not self.is_empty():
_a : List[Any] = self.top
_a : List[str] = node
def __UpperCamelCase ( self ) -> T:
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , lowerCamelCase_ )
_a : Union[str, Any] = self.top
_a : Dict = self.top.next
return pop_node.data
def __UpperCamelCase ( self ) -> T:
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __UpperCamelCase ( self ) -> None:
_a : Optional[Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 120 |
"""simple docstring"""
from torch import nn
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , A : List[str] , A : Any ) -> Tuple:
super().__init__()
lowercase_ : Tuple = class_size
lowercase_ : str = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowercase_ : str = nn.Linear(A , A )
def A ( self : Dict , A : Optional[int] ) -> Tuple:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowercase_ : List[Any] = self.mlp(A )
return logits
| 231 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int ) -> str:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCamelCase__ ( __magic_name__ : dict[int, list[int]] ) -> list[tuple[int, int]]:
'''simple docstring'''
snake_case__ : int = 0
snake_case__ : Tuple = len(__magic_name__ ) # No of vertices in graph
snake_case__ : Union[str, Any] = [0] * n
snake_case__ : Dict = [False] * n
def dfs(__magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
snake_case__ : Optional[Any] = True
snake_case__ : int = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__magic_name__ , __magic_name__ , __magic_name__ , id_ )
snake_case__ : Optional[int] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case__ : Tuple = min(low[at] , low[to] )
snake_case__ : list[tuple[int, int]] = []
for i in range(__magic_name__ ):
if not visited[i]:
dfs(__magic_name__ , -1 , __magic_name__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 419 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Tuple ) -> Dict:
'''simple docstring'''
snake_case__ : Any = state_dict.pop(__magic_name__ )
snake_case__ : Optional[Any] = val
def UpperCamelCase__ ( __magic_name__ : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : Any = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
snake_case__ : Tuple = value
else:
snake_case__ : Union[str, Any] = value
return new_state_dict
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Tuple=False ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = """"""
if is_panoptic:
snake_case__ : int = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
snake_case__ : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : List[Any] = in_proj_weight[:2_56, :]
snake_case__ : int = in_proj_bias[:2_56]
snake_case__ : int = in_proj_weight[2_56:5_12, :]
snake_case__ : Tuple = in_proj_bias[2_56:5_12]
snake_case__ : int = in_proj_weight[-2_56:, :]
snake_case__ : Any = in_proj_bias[-2_56:]
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : str = """resnet101"""
if "dc5" in model_name:
snake_case__ : Optional[int] = True
snake_case__ : str = """panoptic""" in model_name
if is_panoptic:
snake_case__ : List[Any] = 2_50
else:
snake_case__ : Optional[Any] = 91
snake_case__ : Optional[Any] = """huggingface/label-files"""
snake_case__ : Optional[int] = """coco-detection-id2label.json"""
snake_case__ : int = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Any = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : Optional[Any] = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : Optional[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
snake_case__ : Union[str, Any] = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case__ : str = prepare_img()
snake_case__ : Tuple = image_processor(images=__magic_name__ , return_tensors="""pt""" )
snake_case__ : Any = encoding["""pixel_values"""]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
snake_case__ : Optional[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , __magic_name__ , pretrained=__magic_name__ ).eval()
snake_case__ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : Dict = """conditional_detr.""" + src
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : Union[str, Any] = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ , is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : List[Any] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
snake_case__ : Dict = state_dict.pop(__magic_name__ )
snake_case__ : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : Optional[int] = state_dict.pop(__magic_name__ )
snake_case__ : Optional[int] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
snake_case__ : str = state_dict.pop(__magic_name__ )
snake_case__ : Dict = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case__ : int = state_dict.pop(__magic_name__ )
snake_case__ : str = val
# finally, create HuggingFace model and load state dict
snake_case__ : Tuple = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
snake_case__ : Union[str, Any] = conditional_detr(__magic_name__ )
snake_case__ : Dict = model(__magic_name__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A_ : List[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 419 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ :str = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ :Dict = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ :List[str] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ :Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A_ ( snake_case__ ) -> Union[str, Any]:
return x + 2
class A( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = '''x = 3'''
_UpperCamelCase :List[str] = {}
_UpperCamelCase :int = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3} )
_UpperCamelCase :Tuple = '''x = y'''
_UpperCamelCase :Optional[int] = {'''y''': 5}
_UpperCamelCase :Optional[Any] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 5, '''y''': 5} )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :str = '''y = add_two(x)'''
_UpperCamelCase :Optional[int] = {'''x''': 3}
_UpperCamelCase :Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE__ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
_UpperCamelCase :Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
assert result is None
assert "tried to execute add_two" in out.out
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Any = '''x = 3'''
_UpperCamelCase :int = {}
_UpperCamelCase :List[Any] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3} )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :str = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
_UpperCamelCase :str = {'''x''': 3}
_UpperCamelCase :Dict = evaluate(SCREAMING_SNAKE_CASE__ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :int = '''x = 3\ny = 5'''
_UpperCamelCase :int = {}
_UpperCamelCase :Optional[int] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''y''': 5} )
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = '''text = f\'This is x: {x}.\''''
_UpperCamelCase :List[str] = {'''x''': 3}
_UpperCamelCase :Any = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
_UpperCamelCase :Dict = {'''x''': 3}
_UpperCamelCase :Any = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''y''': 2} )
_UpperCamelCase :Union[str, Any] = {'''x''': 8}
_UpperCamelCase :Optional[int] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 8, '''y''': 5} )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :List[str] = '''test_list = [x, add_two(x)]'''
_UpperCamelCase :Optional[int] = {'''x''': 3}
_UpperCamelCase :str = evaluate(SCREAMING_SNAKE_CASE__ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [3, 5] )
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''test_list''': [3, 5]} )
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :List[Any] = '''y = x'''
_UpperCamelCase :List[str] = {'''x''': 3}
_UpperCamelCase :Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''y''': 3} )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Tuple = '''test_list = [x, add_two(x)]\ntest_list[1]'''
_UpperCamelCase :Optional[Any] = {'''x''': 3}
_UpperCamelCase :Any = evaluate(SCREAMING_SNAKE_CASE__ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE__ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''test_list''': [3, 5]} )
_UpperCamelCase :str = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
_UpperCamelCase :Optional[int] = {'''x''': 3}
_UpperCamelCase :Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE__ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :str = '''x = 0\nfor i in range(3):\n x = i'''
_UpperCamelCase :Tuple = {}
_UpperCamelCase :Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ , {'''range''': range} , state=SCREAMING_SNAKE_CASE__ )
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 2, '''i''': 2} )
| 355 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Optional[int] = StableUnCLIPPipeline
snake_case :Optional[Any] = TEXT_TO_IMAGE_PARAMS
snake_case :List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case :Any = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
snake_case :Optional[int] = False
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 32
__UpperCAmelCase : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__UpperCAmelCase : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=UpperCamelCase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase : str = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase_ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=UpperCamelCase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase_ , layers_per_block=1 , upcast_attention=UpperCamelCase_ , use_linear_projection=UpperCamelCase_ , )
torch.manual_seed(0 )
__UpperCAmelCase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = AutoencoderKL()
__UpperCAmelCase : int = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase_ )
@slow
@require_torch_gpu
class __A (unittest.TestCase ):
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__UpperCAmelCase : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : int = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCAmelCase : Tuple = pipe("anime turle" , generator=UpperCamelCase_ , output_type="np" )
__UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase : List[Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : Optional[int] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
snake_case = 'ssube/stable-diffusion-x4-upscaler-onnx'
def lowerCamelCase__ ( self : str , __snake_case : str=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(__snake_case ) )
lowerCamelCase = torch.manual_seed(__snake_case )
lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase = self.get_dummy_inputs()
lowerCamelCase = pipe(**__snake_case ).images
lowerCamelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCamelCase = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase = self.get_dummy_inputs()
lowerCamelCase = pipe(**__snake_case ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase = np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase = self.get_dummy_inputs()
lowerCamelCase = pipe(**__snake_case ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase = np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase = self.get_dummy_inputs()
lowerCamelCase = pipe(**__snake_case ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase = self.get_dummy_inputs()
lowerCamelCase = pipe(**__snake_case ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase = np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
lowerCamelCase = ort.SessionOptions()
lowerCamelCase = False
return options
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCamelCase = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase = 'A fantasy landscape, trending on artstation'
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=__snake_case , image=__snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=__snake_case , output_type='np' , )
lowerCamelCase = output.images
lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCamelCase = init_image.resize((128, 128) )
lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
lowerCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase = 'A fantasy landscape, trending on artstation'
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=__snake_case , image=__snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=__snake_case , output_type='np' , )
lowerCamelCase = output.images
lowerCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase = np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 246 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = 42
snake_case = jnp.floataa
snake_case = True
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
super().setup()
lowerCamelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : int , *__snake_case : str , **__snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCamelCase = super().__call__(*__snake_case , **__snake_case )
lowerCamelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = FlaxBigBirdForNaturalQuestionsModule
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[str] ) -> Any:
"""simple docstring"""
def cross_entropy(UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any=None ):
lowerCamelCase = logits.shape[-1]
lowerCamelCase = (labels[..., None] == jnp.arange(UpperCamelCase_ )[None]).astype('f4' )
lowerCamelCase = jax.nn.log_softmax(UpperCamelCase_ , axis=-1 )
lowerCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase = reduction(UpperCamelCase_ )
return loss
lowerCamelCase = partial(UpperCamelCase_ , reduction=jnp.mean )
lowerCamelCase = cross_entropy(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = cross_entropy(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = cross_entropy(UpperCamelCase_ , UpperCamelCase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = "google/bigbird-roberta-base"
snake_case = 3_000
snake_case = 10_500
snake_case = 128
snake_case = 3
snake_case = 1
snake_case = 5
# tx_args
snake_case = 3E-5
snake_case = 0.0
snake_case = 20_000
snake_case = 0.0_0_9_5
snake_case = "bigbird-roberta-natural-questions"
snake_case = "training-expt"
snake_case = "data/nq-training.jsonl"
snake_case = "data/nq-validation.jsonl"
def lowerCamelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=__snake_case )
lowerCamelCase = os.path.join(self.base_dir , self.save_dir )
lowerCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = 42
snake_case = 4_096 # no dynamic padding on TPUs
def __call__( self : Optional[int] , __snake_case : Any ) -> int:
'''simple docstring'''
lowerCamelCase = self.collate_fn(__snake_case )
lowerCamelCase = jax.tree_util.tree_map(__snake_case , __snake_case )
return batch
def lowerCamelCase__ ( self : Dict , __snake_case : Any ) -> Any:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.fetch_inputs(features['input_ids'] )
lowerCamelCase = {
'input_ids': jnp.array(__snake_case , dtype=jnp.intaa ),
'attention_mask': jnp.array(__snake_case , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def lowerCamelCase__ ( self : Optional[int] , __snake_case : list ) -> str:
'''simple docstring'''
lowerCamelCase = [self._fetch_inputs(__snake_case ) for ids in input_ids]
return zip(*__snake_case )
def lowerCamelCase__ ( self : Tuple , __snake_case : list ) -> int:
'''simple docstring'''
lowerCamelCase = [1 for _ in range(len(__snake_case ) )]
while len(__snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any]=None ) -> Any:
"""simple docstring"""
if seed is not None:
lowerCamelCase = dataset.shuffle(seed=UpperCamelCase_ )
for i in range(len(UpperCamelCase_ ) // batch_size ):
lowerCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase_ )
@partial(jax.pmap , axis_name='batch' )
def a_ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , **UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def loss_fn(UpperCamelCase_ : Optional[int] ):
lowerCamelCase = model_inputs.pop('start_labels' )
lowerCamelCase = model_inputs.pop('end_labels' )
lowerCamelCase = model_inputs.pop('pooled_labels' )
lowerCamelCase = state.apply_fn(**UpperCamelCase_ , params=UpperCamelCase_ , dropout_rng=UpperCamelCase_ , train=UpperCamelCase_ )
lowerCamelCase , lowerCamelCase , lowerCamelCase = outputs
return state.loss_fn(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
lowerCamelCase , lowerCamelCase = jax.random.split(UpperCamelCase_ )
lowerCamelCase = jax.value_and_grad(UpperCamelCase_ )
lowerCamelCase , lowerCamelCase = grad_fn(state.params )
lowerCamelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
lowerCamelCase = jax.lax.pmean(UpperCamelCase_ , 'batch' )
lowerCamelCase = state.apply_gradients(grads=UpperCamelCase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def a_ ( UpperCamelCase_ : List[str] , **UpperCamelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = model_inputs.pop('start_labels' )
lowerCamelCase = model_inputs.pop('end_labels' )
lowerCamelCase = model_inputs.pop('pooled_labels' )
lowerCamelCase = state.apply_fn(**UpperCamelCase_ , params=state.params , train=UpperCamelCase_ )
lowerCamelCase , lowerCamelCase , lowerCamelCase = outputs
lowerCamelCase = state.loss_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class lowerCAmelCase ( train_state.TrainState ):
'''simple docstring'''
snake_case = struct.field(pytree_node=__UpperCamelCase )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = None
def lowerCamelCase__ ( self : Any , __snake_case : str , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = model.params
lowerCamelCase = TrainState.create(
apply_fn=model.__call__ , params=__snake_case , tx=__snake_case , loss_fn=__snake_case , )
if ckpt_dir is not None:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = restore_checkpoint(__snake_case , __snake_case )
lowerCamelCase = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
lowerCamelCase , lowerCamelCase = build_tx(**__snake_case )
lowerCamelCase = train_state.TrainState(
step=__snake_case , apply_fn=model.__call__ , params=__snake_case , tx=__snake_case , opt_state=__snake_case , )
lowerCamelCase = args
lowerCamelCase = data_collator
lowerCamelCase = lr
lowerCamelCase = params
lowerCamelCase = jax_utils.replicate(__snake_case )
return state
def lowerCamelCase__ ( self : int , __snake_case : Any , __snake_case : List[Any] , __snake_case : Any ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.args
lowerCamelCase = len(__snake_case ) // args.batch_size
lowerCamelCase = jax.random.PRNGKey(0 )
lowerCamelCase = jax.random.split(__snake_case , jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase = get_batched_dataset(__snake_case , args.batch_size , seed=__snake_case )
lowerCamelCase = 0
for batch in tqdm(__snake_case , total=__snake_case , desc=F'''Running EPOCH-{epoch}''' ):
lowerCamelCase = self.data_collator(__snake_case )
lowerCamelCase , lowerCamelCase , lowerCamelCase = self.train_step_fn(__snake_case , __snake_case , **__snake_case )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
lowerCamelCase = jax_utils.unreplicate(state.step )
lowerCamelCase = running_loss.item() / i
lowerCamelCase = self.scheduler_fn(state_step - 1 )
lowerCamelCase = self.evaluate(__snake_case , __snake_case )
lowerCamelCase = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(__snake_case ) )
self.logger.log(__snake_case , commit=__snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=__snake_case )
def lowerCamelCase__ ( self : Tuple , __snake_case : Dict , __snake_case : Tuple ) -> Dict:
'''simple docstring'''
lowerCamelCase = get_batched_dataset(__snake_case , self.args.batch_size )
lowerCamelCase = len(__snake_case ) // self.args.batch_size
lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase = 0
for batch in tqdm(__snake_case , total=__snake_case , desc='Evaluating ... ' ):
lowerCamelCase = self.data_collator(__snake_case )
lowerCamelCase = self.val_step_fn(__snake_case , **__snake_case )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def lowerCamelCase__ ( self : Optional[int] , __snake_case : List[str] , __snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = jax_utils.unreplicate(__snake_case )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=' ... ' )
self.model_save_fn(__snake_case , params=state.params )
with open(os.path.join(__snake_case , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__snake_case , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(__snake_case , 'data_collator.joblib' ) )
with open(os.path.join(__snake_case , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , __snake_case )
print('DONE' )
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> str:
"""simple docstring"""
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=' ... ' )
with open(os.path.join(UpperCamelCase_ , 'flax_model.msgpack' ) , 'rb' ) as f:
lowerCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase_ , 'opt_state.msgpack' ) , 'rb' ) as f:
lowerCamelCase = from_bytes(state.opt_state , f.read() )
lowerCamelCase = joblib.load(os.path.join(UpperCamelCase_ , 'args.joblib' ) )
lowerCamelCase = joblib.load(os.path.join(UpperCamelCase_ , 'data_collator.joblib' ) )
with open(os.path.join(UpperCamelCase_ , 'training_state.json' ) , 'r' ) as f:
lowerCamelCase = json.load(UpperCamelCase_ )
lowerCamelCase = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = num_train_steps - warmup_steps
lowerCamelCase = optax.linear_schedule(init_value=UpperCamelCase_ , end_value=UpperCamelCase_ , transition_steps=UpperCamelCase_ )
lowerCamelCase = optax.linear_schedule(init_value=UpperCamelCase_ , end_value=1E-7 , transition_steps=UpperCamelCase_ )
lowerCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
def weight_decay_mask(UpperCamelCase_ : List[str] ):
lowerCamelCase = traverse_util.flatten_dict(UpperCamelCase_ )
lowerCamelCase = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase_ )
lowerCamelCase = scheduler_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = optax.adamw(learning_rate=UpperCamelCase_ , weight_decay=UpperCamelCase_ , mask=UpperCamelCase_ )
return tx, lr
| 246 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str=False ):
UpperCAmelCase_ = OmegaConf.load(__magic_name__ )
if display:
print(yaml.dump(OmegaConf.to_container(__magic_name__ ) ) )
return config
def _lowerCAmelCase ( __magic_name__ :Tuple , __magic_name__ :int=None , __magic_name__ :Any=None ):
if conf_path is None:
UpperCAmelCase_ = '''./model_checkpoints/vqgan_only.yaml'''
UpperCAmelCase_ = load_config(__magic_name__ , display=__magic_name__ )
UpperCAmelCase_ = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase_ = '''./model_checkpoints/vqgan_only.pt'''
UpperCAmelCase_ = torch.load(__magic_name__ , map_location=__magic_name__ )
if ".ckpt" in ckpt_path:
UpperCAmelCase_ = sd['''state_dict''']
model.load_state_dict(__magic_name__ , strict=__magic_name__ )
model.to(__magic_name__ )
del sd
return model
def _lowerCAmelCase ( __magic_name__ :Optional[Any] , __magic_name__ :str ):
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = model.encode(__magic_name__ )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
UpperCAmelCase_ = model.decode(__magic_name__ )
return xrec
def _lowerCAmelCase ( __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any]=False ):
UpperCAmelCase_, UpperCAmelCase_ = string.rsplit('''.''' , 1 )
if reload:
UpperCAmelCase_ = importlib.import_module(__magic_name__ )
importlib.reload(__magic_name__ )
return getattr(importlib.import_module(__magic_name__ , package=__magic_name__ ) , cls )
def _lowerCAmelCase ( __magic_name__ :Any ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def _lowerCAmelCase ( __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] , __magic_name__ :str=True , __magic_name__ :str=True ):
UpperCAmelCase_ = instantiate_from_config(__magic_name__ )
if sd is not None:
model.load_state_dict(__magic_name__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _lowerCAmelCase ( __magic_name__ :List[str] , __magic_name__ :Any , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] ):
# load the specified checkpoint
if ckpt:
UpperCAmelCase_ = torch.load(__magic_name__ , map_location='''cpu''' )
UpperCAmelCase_ = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
UpperCAmelCase_ = {'''state_dict''': None}
UpperCAmelCase_ = None
UpperCAmelCase_ = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__magic_name__ , eval_mode=__magic_name__ )['''model''']
return model, global_step
| 407 |
from functools import lru_cache
@lru_cache
def _lowerCAmelCase ( __magic_name__ :int ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 407 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 45 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 45 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _lowerCamelCase (__SCREAMING_SNAKE_CASE ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_snake_case = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = DPRContextEncoderTokenizer
class _lowerCamelCase (__SCREAMING_SNAKE_CASE ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_snake_case = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = DPRQuestionEncoderTokenizer
SCREAMING_SNAKE_CASE = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
SCREAMING_SNAKE_CASE = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
SCREAMING_SNAKE_CASE = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
class _lowerCamelCase :
def __call__( self : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Union[bool, str] = False , lowerCamelCase_ : Union[bool, str] = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[bool] = None , **lowerCamelCase_ : Tuple , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_lowercase : int = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_lowercase : List[Any] = titles if not isinstance(_a , _a ) else [titles]
_lowercase : Dict = texts if not isinstance(_a , _a ) else [texts]
_lowercase : Tuple = len(_a )
_lowercase : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
assert len(_a ) == len(
_a ), F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.'''
_lowercase : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_lowercase : Union[str, Any] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_lowercase : str = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_lowercase : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowercase : int = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : BatchEncoding , lowerCamelCase_ : DPRReaderOutput , lowerCamelCase_ : int = 1_6 , lowerCamelCase_ : int = 6_4 , lowerCamelCase_ : int = 4 , ):
"""simple docstring"""
_lowercase : Dict = reader_input["""input_ids"""]
_lowercase : Optional[int] = reader_output[:3]
_lowercase : Dict = len(_a )
_lowercase : Dict = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowercase : List[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowercase : Union[str, Any] = len(_a )
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : int , lowerCamelCase_ : int , ):
"""simple docstring"""
_lowercase : List[str] = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowercase : Any = sorted(_a , key=lambda lowerCamelCase_ : x[1] , reverse=_a )
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
_lowercase : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _lowerCamelCase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = READER_PRETRAINED_VOCAB_FILES_MAP
_snake_case = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = READER_PRETRAINED_INIT_CONFIGURATION
_snake_case = ["input_ids", "attention_mask"]
_snake_case = DPRReaderTokenizer | 705 | """simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ""
_snake_case = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_snake_case = None # compression type in fsspec. ex: "gzip"
_snake_case = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , lowerCamelCase_ : str = "" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
super().__init__(self , **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowercase : Union[str, Any] = fsspec.open(
lowerCamelCase_ , mode='rb' , protocol=lowerCamelCase_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowercase : str = os.path.basename(self.file.path.split('::' )[0] )
_lowercase : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
_lowercase : str = None
@classmethod
def __UpperCAmelCase ( cls : int , lowerCamelCase_ : List[str] ):
"""simple docstring"""
return super()._strip_protocol(lowerCamelCase_ ).lstrip('/' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.dir_cache is None:
_lowercase : Tuple = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
_lowercase : int = {f['name']: f}
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.file.open().read()
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : str=None , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
_lowercase : Union[str, Any] = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "bz2"
_snake_case = "bz2"
_snake_case = ".bz2"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "gzip"
_snake_case = "gzip"
_snake_case = ".gz"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "lz4"
_snake_case = "lz4"
_snake_case = ".lz4"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "xz"
_snake_case = "xz"
_snake_case = ".xz"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "zstd"
_snake_case = "zstd"
_snake_case = ".zst"
def __init__( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , lowerCamelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase_ : int , ):
"""simple docstring"""
super().__init__(
fo=lowerCamelCase_ , mode=lowerCamelCase_ , target_protocol=lowerCamelCase_ , target_options=lowerCamelCase_ , block_size=lowerCamelCase_ , **lowerCamelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowercase : Any = self.file.__enter__
class _lowerCamelCase :
def __init__( self : Any , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
_lowercase : Tuple = file_
def __enter__( self : str ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
self._file.__exit__(*lowerCamelCase_ , **lowerCamelCase_ )
def __iter__( self : Optional[int] ):
"""simple docstring"""
return iter(self._file )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
return getattr(self._file , lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_ : List[Any] , **lowerCamelCase_ : int ):
return WrappedFile(_enter(*lowerCamelCase_ , **lowerCamelCase_ ) )
_lowercase : Optional[int] = fixed_enter
| 283 | 0 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if nth_term == "":
return [""]
_UpperCAmelCase = int(_A )
_UpperCAmelCase = int(_A )
_UpperCAmelCase = []
for temp in range(int(_A ) ):
series.append(f'''1 / {pow(temp + 1 ,int(_A ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCAmelCase__ = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 277 | '''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase__ : str = get_tests_dir('fixtures')
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = mock.Mock()
UpperCAmelCase__ : Dict = 500
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : List[str] = HTTPError
UpperCAmelCase__ : Tuple = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase__ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase__ : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
UpperCAmelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' ,subfolder='''feature_extractor''' )
self.assertIsNotNone(lowerCamelCase_ )
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ViTImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub('''test-image-processor''' ,use_auth_token=self._token )
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCamelCase_ ,repo_id='''test-image-processor''' ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase__ : List[Any] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ViTImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub('''valid_org/test-image-processor''' ,use_auth_token=self._token )
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCamelCase_ ,repo_id='''valid_org/test-image-processor-org''' ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase__ : List[str] = CustomImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub('''test-dynamic-image-processor''' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map ,{'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} ,)
UpperCAmelCase__ : Any = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ ,'''CustomImageProcessor''' )
| 614 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowercase = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__lowercase = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__lowercase = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 56 | 1 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> list[list]:
SCREAMING_SNAKE_CASE_ : Tuple = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Any = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
if magnitude == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = column
continue
SCREAMING_SNAKE_CASE_ : int = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE_ : Any = current_set[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [first_row]
SCREAMING_SNAKE_CASE_ : Any = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE_ : Dict = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE_ : Dict = final_set[0]
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : int = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE_ : List[Any] = simplify(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resultant
return final_set
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE ) + 1
if any(len(SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE_ : List[str] = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = data_set.copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
if 0 not in row:
SCREAMING_SNAKE_CASE_ : List[str] = data_set.pop(SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = data_set.copy()
SCREAMING_SNAKE_CASE_ : Any = simplify(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = simplified[::-1]
SCREAMING_SNAKE_CASE_ : list = []
for row in simplified:
SCREAMING_SNAKE_CASE_ : Any = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE_ : Dict = row.copy()[: len(SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE_ : Any = temp_row[1::]
SCREAMING_SNAKE_CASE_ : Tuple = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__: Dict = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 345 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase__: int = datasets.logging.get_logger(__name__)
lowerCAmelCase__: Optional[int] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCAmelCase__: Any = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCAmelCase__: Tuple = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="dummy_doc" ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Dict = {doc: key_lines}
SCREAMING_SNAKE_CASE_ : Dict = {doc: sys_lines}
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : List[str] = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if remove_nested:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ : Optional[int] = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'Number of resulting singleton clusters in the key '
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively' )
return doc_coref_infos
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_coref_infos(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 100:.2f}' , f' Precision: {precision * 100:.2f}' , f' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE_ : Dict = (conll / 3) * 100
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({'conll_score': conll} )
return output_scores
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : Dict = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE_ : Dict = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE_ : int = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ : Any = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE_ : Optional[int] = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 345 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
lowercase : str =ShapEPipeline
lowercase : int =['prompt']
lowercase : str =['prompt']
lowercase : Any =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : Optional[int] =False
@property
def UpperCamelCase ( self ):
return 32
@property
def UpperCamelCase ( self ):
return 32
@property
def UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def UpperCamelCase ( self ):
return 8
@property
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__A )
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :List[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase_ :Tuple = PriorTransformer(**__A )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :List[Any] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase_ :List[Any] = ShapERenderer(**__A )
return model
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.dummy_prior
lowercase_ :Optional[Any] = self.dummy_text_encoder
lowercase_ :str = self.dummy_tokenizer
lowercase_ :Optional[int] = self.dummy_renderer
lowercase_ :int = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=__A , clip_sample=__A , clip_sample_range=1.0 , )
lowercase_ :Optional[Any] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(__A ).startswith('''mps''' ):
lowercase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowercase_ :Any = torch.Generator(device=__A ).manual_seed(__A )
lowercase_ :Optional[int] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase ( self ):
lowercase_ :int = '''cpu'''
lowercase_ :Dict = self.get_dummy_components()
lowercase_ :List[Any] = self.pipeline_class(**__A )
lowercase_ :str = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowercase_ :int = pipe(**self.get_dummy_inputs(__A ) )
lowercase_ :int = output.images[0]
lowercase_ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase_ :Tuple = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase ( self ):
lowercase_ :Tuple = torch_device == '''cpu'''
lowercase_ :List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__A , relax_max_difference=__A , )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.get_dummy_components()
lowercase_ :Dict = self.pipeline_class(**__A )
lowercase_ :str = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowercase_ :int = 1
lowercase_ :Optional[int] = 2
lowercase_ :Dict = self.get_dummy_inputs(__A )
for key in inputs.keys():
if key in self.batch_params:
lowercase_ :Optional[int] = batch_size * [inputs[key]]
lowercase_ :Tuple = pipe(**__A , num_images_per_prompt=__A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
lowercase_ :List[Any] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
lowercase_ :str = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowercase_ :str = torch.Generator(device=__A ).manual_seed(0 )
lowercase_ :Optional[int] = pipe(
'''a shark''' , generator=__A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__A , __A )
| 714 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , ):
lowercase_ :Dict = parent
lowercase_ :Optional[Any] = 13
lowercase_ :Optional[Any] = 7
lowercase_ :List[Any] = 30
lowercase_ :int = self.seq_length + self.mem_len
lowercase_ :Any = 15
lowercase_ :Optional[Any] = True
lowercase_ :List[Any] = True
lowercase_ :Any = 99
lowercase_ :Optional[int] = [10, 50, 80]
lowercase_ :Union[str, Any] = 32
lowercase_ :List[Any] = 32
lowercase_ :Tuple = 4
lowercase_ :Tuple = 8
lowercase_ :List[Any] = 128
lowercase_ :Any = 2
lowercase_ :Tuple = 2
lowercase_ :Dict = None
lowercase_ :Optional[Any] = 1
lowercase_ :Optional[int] = 0
lowercase_ :List[str] = 3
lowercase_ :Optional[int] = self.vocab_size - 1
lowercase_ :List[Any] = 0.01
def UpperCamelCase ( self ):
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :int = None
if self.use_labels:
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :int = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCamelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = TFTransfoXLModel(UpperCamelCase_ )
lowercase_ , lowercase_ :List[Any] = model(UpperCamelCase_ ).to_tuple()
lowercase_ :Dict = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase_ , lowercase_ :int = model(UpperCamelCase_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = TFTransfoXLLMHeadModel(UpperCamelCase_ )
lowercase_ , lowercase_ :int = model(UpperCamelCase_ ).to_tuple()
lowercase_ :Optional[int] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase_ , lowercase_ :Optional[int] = model(UpperCamelCase_ ).to_tuple()
lowercase_ , lowercase_ :Tuple = model([input_ids_a, mems_a] ).to_tuple()
lowercase_ :Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase_ , lowercase_ :Union[str, Any] = model(UpperCamelCase_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :int = TFTransfoXLForSequenceClassification(UpperCamelCase_ )
lowercase_ :int = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
lowercase_ :str = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) :Tuple = config_and_inputs
lowercase_ :Dict = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : str =(
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowercase : Dict =() if is_tf_available() else ()
lowercase : List[str] =(
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowercase : Optional[int] =False
lowercase : Tuple =False
lowercase : Dict =False
lowercase : Dict =False
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = TFTransfoXLModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ , d_embed=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
self.model_tester.set_seed()
lowercase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase_ )
def UpperCamelCase ( self ):
self.model_tester.set_seed()
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :List[str] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase_ :Dict = model_class(UpperCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase_ :str = model.get_output_embeddings()
assert isinstance(UpperCamelCase_ , tf.keras.layers.Layer )
lowercase_ :Optional[int] = model.get_bias()
assert name is None
else:
lowercase_ :List[Any] = model.get_output_embeddings()
assert x is None
lowercase_ :Dict = model.get_bias()
assert name is None
def UpperCamelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCamelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Dict = TFTransfoXLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCamelCase ( self ):
pass
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCamelCase ( self ):
lowercase_ :Any = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase_ :List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase_ :List[Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase_ :Any = model.generate(UpperCamelCase_ , max_length=200 , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase_ )
| 441 | 0 |
import torch
from diffusers import DiffusionPipeline
class snake_case ( UpperCamelCase_ ):
def __init__( self : Any , a_ : Optional[int] , a_ : Dict )-> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
def __call__( self : Optional[int] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : Tuple = self.unet(a_ , a_ ).sample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler.step(a_ , a_ , a_ ).prev_sample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_output - scheduler_output + torch.ones_like(a_ )
return result
| 85 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> list[int]:
a_ : List[str] = int(SCREAMING_SNAKE_CASE__ )
# Initialize Result
a_ : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(SCREAMING_SNAKE_CASE__ ):
# Find denominations
while int(SCREAMING_SNAKE_CASE__ ) >= int(SCREAMING_SNAKE_CASE__ ):
total_value -= int(SCREAMING_SNAKE_CASE__ )
answer.append(SCREAMING_SNAKE_CASE__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
SCREAMING_SNAKE_CASE_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """) | 237 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__(A , A ) ->Any:
"""simple docstring"""
lowercase__ : Any= []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__(A ) ->List[Any]:
"""simple docstring"""
lowercase__ : Dict= []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict= []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowercase__(A , A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : List[str]= "imagenet-1k-id2label.json"
lowercase__ : List[str]= 1_000
lowercase__ : Tuple= "huggingface/label-files"
lowercase__ : int= num_labels
lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ : str= {int(A ): v for k, v in idalabel.items()}
lowercase__ : Optional[int]= idalabel
lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()}
lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase__ : int= [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase__ : Union[str, Any]= [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Optional[Any]= [2, 2, 20]
lowercase__ : Optional[Any]= [3, 12, 16]
lowercase__ : List[str]= [192, 768, 1_024]
lowercase__ : List[str]= CvtForImageClassification(A )
lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase__ : Dict= image_size
lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) )
lowercase__ : Optional[Any]= OrderedDict()
lowercase__ : Tuple= []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Optional[int]= list_of_state_dict + cls_token(A )
lowercase__ : List[str]= list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
lowercase__ : Dict= list_of_state_dict + attention(A , A )
lowercase__ : Optional[Any]= list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
lowercase__ : str= original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 85 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : List[Any] = {'UserAgent': UserAgent().random}
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = script.contents[0]
SCREAMING_SNAKE_CASE = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = f"https://www.instagram.com/{username}/"
SCREAMING_SNAKE_CASE = self.get_json()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__lowerCamelCase ).text
SCREAMING_SNAKE_CASE = BeautifulSoup(__lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : str ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["username"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["full_name"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["biography"]
@property
def _snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def _snake_case ( self : Optional[Any] ):
return self.user_data["external_url"]
@property
def _snake_case ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self : Any ):
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self : Optional[int] ):
return self.user_data["is_verified"]
@property
def _snake_case ( self : Dict ):
return self.user_data["is_private"]
def __a ( A__ : str = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE = InstagramUser(A__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }') | 16 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : list[list[str]] , lowerCamelCase__ : int , ):
'''simple docstring'''
A: Union[str, Any] = len(lowerCamelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCamelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCamelCase__ , lowerCamelCase__ , )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int ):
'''simple docstring'''
A: list[list[str]] = []
depth_first_search([] , [] , [] , lowerCamelCase__ , lowerCamelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCamelCase__ )
print("""""" )
print(len(lowerCamelCase__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 135 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = list(range(len(lowerCamelCase_ ) ) )
SCREAMING_SNAKE_CASE__ = [v / w for v, w in zip(lowerCamelCase_ , lowerCamelCase_ )]
index.sort(key=lambda lowerCamelCase_ : ratio[i] , reverse=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [0] * len(lowerCamelCase_ )
for i in index:
if weight[i] <= capacity:
SCREAMING_SNAKE_CASE__ = 1
max_value += value[i]
capacity -= weight[i]
else:
SCREAMING_SNAKE_CASE__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowercase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
SCREAMING_SNAKE_CASE__ = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(lowerCamelCase_ ) ):
if lista[i] != lista[i]:
count += 1
SCREAMING_SNAKE_CASE__ = "_"
if count > 1:
return False
else:
return "".join(lowerCamelCase_ )
def __lowercase ( lowerCamelCase_ : list[str] ):
SCREAMING_SNAKE_CASE__ = []
while True:
SCREAMING_SNAKE_CASE__ = ["$"] * len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = []
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = compare_string(binary[i] , binary[j] )
if k is False:
SCREAMING_SNAKE_CASE__ = "*"
SCREAMING_SNAKE_CASE__ = "*"
temp.append("X" )
for i in range(len(lowerCamelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCamelCase_ ) == 0:
return pi
SCREAMING_SNAKE_CASE__ = list(set(lowerCamelCase_ ) )
def __lowercase ( lowerCamelCase_ : int , lowerCamelCase_ : Sequence[float] ):
SCREAMING_SNAKE_CASE__ = []
for minterm in minterms:
SCREAMING_SNAKE_CASE__ = ""
for _ in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCamelCase_ )
return temp
def __lowercase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(lowerCamelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowercase ( lowerCamelCase_ : list[list[int]] , lowerCamelCase_ : list[str] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [0] * len(lowerCamelCase_ )
for i in range(len(chart[0] ) ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = -1
for j in range(len(lowerCamelCase_ ) ):
if chart[j][i] == 1:
count += 1
SCREAMING_SNAKE_CASE__ = j
if count == 1:
SCREAMING_SNAKE_CASE__ = 1
for i in range(len(lowerCamelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = 0
temp.append(prime_implicants[i] )
while True:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = -1
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = chart[i].count(1 )
if count_n > max_n:
SCREAMING_SNAKE_CASE__ = count_n
SCREAMING_SNAKE_CASE__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = 0
def __lowercase ( lowerCamelCase_ : list[str] , lowerCamelCase_ : list[str] ):
SCREAMING_SNAKE_CASE__ = [[0 for x in range(len(lowerCamelCase_ ) )] for x in range(len(lowerCamelCase_ ) )]
for i in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = prime_implicants[i].count("_" )
for j in range(len(lowerCamelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = 1
return chart
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = int(input("Enter the no. of variables\n" ) )
SCREAMING_SNAKE_CASE__ = [
float(lowerCamelCase_ )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
SCREAMING_SNAKE_CASE__ = decimal_to_binary(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = check(lowerCamelCase_ )
print("Prime Implicants are:" )
print(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = prime_implicant_chart(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = selection(lowerCamelCase_ , lowerCamelCase_ )
print("Essential Prime Implicants are:" )
print(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 112 | 1 |
import functools
from typing import Any
def __magic_name__ ( __a : Dict , __a : Tuple ):
'''simple docstring'''
if not isinstance(__a , __a ) or len(__a ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(__a , __a ) or not all(
isinstance(__a , __a ) and len(__a ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
UpperCamelCase__ = {}
UpperCamelCase__ = """WORD_KEEPER"""
for word in words:
UpperCamelCase__ = trie
for c in word:
if c not in trie_node:
UpperCamelCase__ = {}
UpperCamelCase__ = trie_node[c]
UpperCamelCase__ = True
UpperCamelCase__ = len(__a )
# Dynamic programming method
@functools.cache
def is_breakable(__a : Union[str, Any] ) -> bool:
if index == len_string:
return True
UpperCamelCase__ = trie
for i in range(__a , __a ):
UpperCamelCase__ = trie_node.get(string[i] , __a )
if trie_node is None:
return False
if trie_node.get(__a , __a ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> List[Any]:
try:
snake_case : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
snake_case : Tuple = strtobool(lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
lowerCamelCase : Tuple = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
return unittest.skip("""Test was skipped""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(_run_slow_tests ,"""test is slow""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
return unittest.skipUnless(not torch.cuda.is_available() ,"""test requires only a CPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.is_available() ,"""test requires a GPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
return unittest.skipUnless(is_xpu_available() ,"""test requires a XPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
return unittest.skipUnless(is_mps_available() ,"""test requires a `mps` backend support in `torch`""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() ,"""test requires the Hugging Face suite""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
return unittest.skipUnless(is_bnb_available() ,"""test requires the bitsandbytes library""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
return unittest.skipUnless(is_tpu_available() ,"""test requires TPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() == 1 ,"""test requires a GPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return unittest.skipUnless(torch.xpu.device_count() == 1 ,"""test requires a XPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
return unittest.skipUnless(torch.cuda.device_count() > 1 ,"""test requires multiple GPUs""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
return unittest.skipUnless(torch.xpu.device_count() > 1 ,"""test requires multiple XPUs""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
return unittest.skipUnless(is_safetensors_available() ,"""test requires safetensors""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
return unittest.skipUnless(is_deepspeed_available() ,"""test requires DeepSpeed""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(is_torch_version(""">=""" ,"""1.12.0""" ) ,"""test requires torch version >= 1.12.0""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase=None ,lowercase=None ) -> Optional[int]:
if test_case is None:
return partial(lowercase ,version=lowercase )
return unittest.skipUnless(is_torch_version(""">=""" ,lowercase ) ,f"""test requires torch version >= {version}""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
return unittest.skipUnless(is_tensorboard_available() ,"""test requires Tensorboard""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(is_wandb_available() ,"""test requires wandb""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(is_comet_ml_available() ,"""test requires comet_ml""" )(lowercase )
lowerCamelCase : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
return unittest.skipUnless(
_atleast_one_tracker_available ,"""test requires at least one tracker to be available and for `comet_ml` to not be installed""" ,)(lowercase )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
_snake_case = True
@classmethod
def UpperCAmelCase ( cls ) -> int:
snake_case : int = tempfile.mkdtemp()
@classmethod
def UpperCAmelCase ( cls ) -> str:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def UpperCAmelCase ( self ) -> Tuple:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
snake_case : List[str] = mocks if isinstance(A , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : Optional[int] = AcceleratorState()
snake_case : int = tensor[None].clone().to(state.device )
snake_case : Dict = gather(lowercase ).cpu()
snake_case : str = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] ,lowercase ):
return False
return True
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A ) -> Optional[int]:
snake_case : Tuple = returncode
snake_case : str = stdout
snake_case : int = stderr
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
while True:
snake_case : Any = await stream.readline()
if line:
callback(lowercase )
else:
break
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase=False ,lowercase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ ,""" """.join(lowercase ) )
snake_case : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case : Dict = []
snake_case : Union[str, Any] = []
def tee(lowercase ,lowercase ,lowercase ,lowercase="" ):
snake_case : str = line.decode("""utf-8""" ).rstrip()
sink.append(lowercase )
if not quiet:
print(lowercase ,lowercase ,file=lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout ,lambda lowercase : tee(lowercase ,lowercase ,sys.stdout ,label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr ,lambda lowercase : tee(lowercase ,lowercase ,sys.stderr ,label="""stderr:""" ) ) ),
] ,timeout=lowercase ,)
return _RunOutput(await p.wait() ,lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=180 ,lowercase=False ,lowercase=True ) -> _RunOutput:
snake_case : str = asyncio.get_event_loop()
snake_case : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(lowercase ,env=lowercase ,stdin=lowercase ,timeout=lowercase ,quiet=lowercase ,echo=lowercase ) )
snake_case : List[str] = """ """.join(lowercase )
if result.returncode > 0:
snake_case : List[Any] = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> List[str]:
try:
snake_case : List[str] = subprocess.check_output(lowercase ,stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowercase ,"""decode""" ):
snake_case : List[str] = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{" ".join(lowercase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 587 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=0.6 , A_=None , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> str:
'''simple docstring'''
UpperCamelCase = ViTMAEModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = ViTMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase = model(_lowerCAmelCase )
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = ViTMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_lowerCAmelCase )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase):
lowerCAmelCase_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase_ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = ViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_lowerCAmelCase )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = torch.from_numpy(_lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = pt_noise
super().check_pt_tf_models(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCamelCase = outputs[0].cpu().numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
UpperCamelCase = model_class.from_pretrained(_lowerCAmelCase )
model.to(_lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
# Make sure we don't have nans
UpperCamelCase = after_outputs[0].cpu().numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ViTMAEModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def A_( ):
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(_lowerCAmelCase )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_lowerCAmelCase , noise=torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase ) )
# verify the logits
UpperCamelCase = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCamelCase = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_lowerCAmelCase ) , atol=1e-4 ) )
| 709 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Union[str, Any] = 16
lowerCAmelCase : Any = 32
def A_( A : Accelerator , A : int = 16):
UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase = load_dataset('glue' , 'mrpc')
def tokenize_function(A : Dict):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(A : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
A , padding='longest' , max_length=A , pad_to_multiple_of=A , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A)
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def A_( A : List[str] , A : Dict):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=A)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['lr']
UpperCamelCase = int(config['num_epochs'])
UpperCamelCase = int(config['seed'])
UpperCamelCase = int(config['batch_size'])
UpperCamelCase = evaluate.load('glue' , 'mrpc')
set_seed(A)
UpperCamelCase , UpperCamelCase = get_dataloaders(A , A)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=A)
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
A , A , A , A , A)
# Now we train the model
for epoch in range(A):
model.train()
for step, batch in enumerate(A):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(A):
UpperCamelCase = model(**A)
UpperCamelCase = output.loss
accelerator.backward(A)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase = model(**A)
UpperCamelCase = outputs.logits.argmax(dim=-1)
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=A , references=A , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A)
def A_( ):
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=A , default=A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=A , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A , A)
if __name__ == "__main__":
main()
| 432 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ : Any = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Dict = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : List[str] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCAmelCase_ : int = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCAmelCase_ : Any = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCAmelCase_ : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ : Dict = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __lowerCAmelCase ( __a ):
snake_case : str = VOCAB_FILES_NAMES
snake_case : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case : Dict = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowerCAmelCase ( __a ):
snake_case : List[str] = VOCAB_FILES_NAMES
snake_case : Optional[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : Any = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase_ : Any = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase_ : Optional[Any] = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__a )
class __lowerCAmelCase :
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
_UpperCAmelCase : List[str] = titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : int = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [titles]
_UpperCAmelCase : List[str] = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [texts]
_UpperCAmelCase : Dict = len(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [questions] * n_passages
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
F"There should be as many titles than texts but got {len(lowerCAmelCase__ )} titles and {len(lowerCAmelCase__ )} texts." )
_UpperCAmelCase : Tuple = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["""input_ids"""]
_UpperCAmelCase : List[Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["""input_ids"""]
_UpperCAmelCase : List[Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
}
if return_attention_mask is not False:
_UpperCAmelCase : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCAmelCase : Any = attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 4 , ):
_UpperCAmelCase : Union[str, Any] = reader_input["""input_ids"""]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = reader_output[:3]
_UpperCAmelCase : int = len(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = sorted(range(lowerCAmelCase__ ) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__ )
_UpperCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_UpperCAmelCase : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase : Union[str, Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
_UpperCAmelCase : List[str] = len(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
_UpperCAmelCase : List[str] = []
for start_index, start_score in enumerate(lowerCAmelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCAmelCase : Tuple = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] , reverse=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]" )
_UpperCAmelCase : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__a )
class __lowerCAmelCase ( __a , __a ):
snake_case : Optional[Any] = VOCAB_FILES_NAMES
snake_case : Any = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case : Tuple = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : Any = READER_PRETRAINED_INIT_CONFIGURATION
snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
| 414 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ):
_UpperCAmelCase : Any = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : int = image_size
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Any = use_labels
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Optional[int] = scope
_UpperCAmelCase : Any = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase : List[Any] = (image_size // patch_size) ** 2
_UpperCAmelCase : Dict = num_patches + 2
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def snake_case_ (self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = TFDeiTModel(config=lowerCAmelCase__ )
_UpperCAmelCase : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = TFDeiTForMaskedImageModeling(config=lowerCAmelCase__ )
_UpperCAmelCase : int = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Optional[int] = TFDeiTForMaskedImageModeling(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = self.type_sequence_label_size
_UpperCAmelCase : Optional[int] = TFDeiTForImageClassification(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : int = 1
_UpperCAmelCase : Optional[int] = TFDeiTForImageClassification(lowerCAmelCase__ )
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ (self ):
_UpperCAmelCase : int = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __a , __a , unittest.TestCase ):
snake_case : List[str] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case : Any = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case : List[str] = False
snake_case : Any = False
snake_case : Optional[Any] = False
snake_case : Any = False
def snake_case_ (self ):
_UpperCAmelCase : Any = TFDeiTModelTester(self )
_UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def snake_case_ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def snake_case_ (self ):
pass
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Dense ) )
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(lowerCAmelCase__ )
_UpperCAmelCase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
_UpperCAmelCase : Optional[Any] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def snake_case_ (self ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Union[str, Any] = TFDeiTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __A ( ):
_UpperCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case_ (self ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""tf""" )
# forward pass
_UpperCAmelCase : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
_UpperCAmelCase : Dict = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 414 | 1 |
from heapq import heappop, heappush
import numpy as np
def a__ ( A_, A_, A_, A_, ):
'''simple docstring'''
__magic_name__ = grid.shape
__magic_name__ = [-1, 1, 0, 0]
__magic_name__ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__magic_name__ = [(0, source)], set()
__magic_name__ = np.full((rows, cols), np.inf )
__magic_name__ = 0
__magic_name__ = np.empty((rows, cols), dtype=UpperCAmelCase__ )
__magic_name__ = None
while queue:
(__magic_name__) = heappop(UpperCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__magic_name__ = []
while (x, y) != source:
path.append((x, y) )
__magic_name__ = predecessors[x, y]
path.append(UpperCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase__ ) ):
__magic_name__ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__magic_name__ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase__, (dist + 1, (nx, ny)) )
__magic_name__ = dist + 1
__magic_name__ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import collections
import importlib.util
import os
import re
from pathlib import Path
__lowerCAmelCase : int = 'src/transformers'
# Matches is_xxx_available()
__lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__lowerCAmelCase : int = re.compile(R'^\s*try:')
# Catches a line with else:
__lowerCAmelCase : Tuple = re.compile(R'^\s*else:')
def a__ ( A_ ):
'''simple docstring'''
if _re_test_backend.search(A_ ) is None:
return None
__magic_name__ = [b[0] for b in _re_backend.findall(A_ )]
backends.sort()
return "_and_".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
__magic_name__ = f.readlines()
__magic_name__ = 0
while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A_ ):
return None
# First grab the objects without a specific backend in _import_structure
__magic_name__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__magic_name__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A_ ):
__magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0]
__magic_name__ = re.findall("""\[([^\]]+)\]""", A_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__magic_name__ = _re_import_struct_key_value.search(A_ )
if single_line_import_search is not None:
__magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0]
objects.extend(A_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__magic_name__ = lines[line_index]
if _re_import_struct_add_one.search(A_ ) is not None:
objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] )
elif _re_import_struct_add_many.search(A_ ) is not None:
__magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_between_brackets.search(A_ ) is not None:
__magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_quote_object.search(A_ ) is not None:
objects.append(_re_quote_object.search(A_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__magic_name__ = []
while (
line_index < len(A_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(A_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( A_, A_ ):
'''simple docstring'''
def find_duplicates(A_ ):
return [k for k, v in collections.Counter(A_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__magic_name__ = []
for key in import_dict_objects.keys():
__magic_name__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__magic_name__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__magic_name__ = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
__magic_name__ = os.path.join(A_, """__init__.py""" )
__magic_name__ = parse_init(A_ )
if objects is not None:
__magic_name__ = analyze_results(*A_ )
if len(A_ ) > 0:
__magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(A_ ) )
if len(A_ ) > 0:
raise ValueError("""\n\n""".join(A_ ) )
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for path, directories, files in os.walk(A_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(A_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) )
__magic_name__ = short_path.replace(os.path.sep, """.""" )
submodules.append(A_ )
for fname in files:
if fname == "__init__.py":
continue
__magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) )
__magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(A_ )
return submodules
__lowerCAmelCase : Dict = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def a__ ( ):
'''simple docstring'''
__magic_name__ = importlib.util.spec_from_file_location(
"""transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
__magic_name__ = spec.loader.load_module()
__magic_name__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A_ ) > 0:
__magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 76 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
A = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : List[Any] = None
# Automatically constructed
lowerCAmelCase__ : Dict = "dict"
lowerCAmelCase__ : Optional[int] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowerCAmelCase__ : Optional[int] = field(default="Audio" , init=A__ , repr=A__ )
def __call__( self : Optional[int] ) -> List[str]:
return self.pa_type
def _lowerCamelCase ( self : str ,UpperCamelCase : Union[str, bytes, dict] ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCamelCase ,__lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowercase : Optional[int] = BytesIO()
sf.write(__lowerCamelCase ,value['array'] ,value['sampling_rate'] ,format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowercase : Any = np.frombuffer(value['bytes'] ,dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
_lowercase : str = np.memmap(value['path'] ,dtype='h' ,mode='r' ).astype(np.floataa ) / 3_2767
_lowercase : Union[str, Any] = BytesIO(bytes() )
sf.write(__lowerCamelCase ,__lowerCamelCase ,value['sampling_rate'] ,format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _lowerCamelCase ( self : Dict ,UpperCamelCase : dict ,UpperCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
_lowercase , _lowercase : str = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
_lowercase : str = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
_lowercase : List[str] = token_per_repo_id or {}
_lowercase : List[Any] = path.split('::' )[-1]
try:
_lowercase : Optional[int] = string_to_dict(__lowerCamelCase ,config.HUB_DATASETS_URL )['repo_id']
_lowercase : Any = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowercase : Optional[int] = None
with xopen(__lowerCamelCase ,'rb' ,use_auth_token=__lowerCamelCase ) as f:
_lowercase , _lowercase : Optional[int] = sf.read(__lowerCamelCase )
else:
_lowercase , _lowercase : Union[str, Any] = sf.read(__lowerCamelCase )
_lowercase : Optional[Any] = array.T
if self.mono:
_lowercase : Tuple = librosa.to_mono(__lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowercase : Optional[Any] = librosa.resample(__lowerCamelCase ,orig_sr=__lowerCamelCase ,target_sr=self.sampling_rate )
_lowercase : int = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowerCamelCase ( self : Optional[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def _lowerCamelCase ( self : str ,UpperCamelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_lowercase : Optional[int] = pa.array([None] * len(__lowerCamelCase ) ,type=pa.binary() )
_lowercase : str = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowercase : int = pa.array([None] * len(__lowerCamelCase ) ,type=pa.string() )
_lowercase : Dict = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
_lowercase : Tuple = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_lowercase : Tuple = storage.field('bytes' )
else:
_lowercase : Any = pa.array([None] * len(__lowerCamelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_lowercase : List[Any] = storage.field('path' )
else:
_lowercase : Optional[int] = pa.array([None] * len(__lowerCamelCase ) ,type=pa.string() )
_lowercase : int = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
return array_cast(__lowerCamelCase ,self.pa_type )
def _lowerCamelCase ( self : Optional[Any] ,UpperCamelCase : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(UpperCamelCase : Dict ):
with xopen(__lowerCamelCase ,'rb' ) as f:
_lowercase : Optional[Any] = f.read()
return bytes_
_lowercase : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_lowercase : Tuple = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_lowercase : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase ,self.pa_type ) | 125 |
import numpy as np
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ) -> None:
SCREAMING_SNAKE_CASE__ = np.array(__lowerCamelCase )
def lowercase_ ( self : List[str] , __lowerCamelCase : str ) -> np.ndarray:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE__ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowercase_ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int ) -> str:
SCREAMING_SNAKE_CASE__ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowercase_ ( self : List[Any] , __lowerCamelCase : str ) -> str:
SCREAMING_SNAKE_CASE__ = message.lower()
SCREAMING_SNAKE_CASE__ = message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE__ = message.replace('''j''' , '''i''' )
SCREAMING_SNAKE_CASE__ = np.empty((2, len(__lowerCamelCase )) )
for letter_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE__ = numbers[0]
SCREAMING_SNAKE_CASE__ = numbers[1]
SCREAMING_SNAKE_CASE__ = first_step.reshape(2 * len(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE__ = int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE__ = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = encoded_message + letter
return encoded_message
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> str:
SCREAMING_SNAKE_CASE__ = message.lower()
message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE__ = np.empty(2 * len(__lowerCamelCase ) )
for letter_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE__ = numbers[0]
SCREAMING_SNAKE_CASE__ = numbers[1]
SCREAMING_SNAKE_CASE__ = first_step.reshape((2, len(__lowerCamelCase )) )
SCREAMING_SNAKE_CASE__ = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ = int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE__ = int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE__ = self.numbers_to_letter(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = decoded_message + letter
return decoded_message
| 493 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=14 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=4 , a=4 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=0.02 , ) -> Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _UpperCamelCase ( self , a , a , a , a ) -> str:
snake_case_ = 20
snake_case_ = model_class_name(a )
snake_case_ = model.init_cache(input_ids.shape[0] , a )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=a , past_key_values=a , position_ids=a , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=a , past_key_values=outputs_cache.past_key_values , position_ids=a , )
snake_case_ = model(a )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _UpperCamelCase ( self , a , a , a , a ) -> List[str]:
snake_case_ = 20
snake_case_ = model_class_name(a )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , a )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=a , past_key_values=a , position_ids=a , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=a , position_ids=a , )
snake_case_ = model(a , attention_mask=a )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = FlaxGPTJModelTester(self )
def _UpperCamelCase ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(a , a , a , a )
def _UpperCamelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
a , a , a , a )
@tooslow
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
snake_case_ = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=a , truncation=a )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(a , skip_special_tokens=a )
snake_case_ = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(a , a )
@is_pt_flax_cross_test
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(a , a )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(a , a )
snake_case_ , snake_case_ = pt_inputs['input_ids'].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(a ).eval()
snake_case_ = model_class(a , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**a ).to_tuple()
snake_case_ = fx_model(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(a , a ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(a )
snake_case_ = model_class.from_pretrained(a , from_pt=a )
snake_case_ = fx_model_loaded(**a ).to_tuple()
self.assertEqual(
len(a ) , len(a ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(a , a ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def _UpperCamelCase ( self ) -> str:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(a , a )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(a , a )
snake_case_ = pt_model_class(a ).eval()
snake_case_ = model_class(a , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(a , fx_model.params )
snake_case_ , snake_case_ = pt_inputs['input_ids'].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**a ).to_tuple()
snake_case_ = fx_model(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(a , a ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(a )
snake_case_ = pt_model_class.from_pretrained(a , from_flax=a )
with torch.no_grad():
snake_case_ = pt_model_loaded(**a ).to_tuple()
self.assertEqual(
len(a ) , len(a ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(a , a ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def _UpperCamelCase ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
| 607 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( a_):
return 1.0 / (1.0 + np.exp(-_outputs))
def __UpperCAmelCase ( a_):
snake_case_ = np.max(_outputs , axis=-1 , keepdims=a_)
snake_case_ = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a_)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = '''sigmoid'''
lowerCAmelCase = '''softmax'''
lowerCAmelCase = '''none'''
@add_end_docstrings(
snake_case_ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = False
lowerCAmelCase = ClassificationFunction.NONE
def __init__( self , **a ) -> int:
super().__init__(**a )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _UpperCamelCase ( self , a=None , a=None , a="" , **a ) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
snake_case_ = tokenizer_kwargs
snake_case_ = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
snake_case_ = self.model.config.return_all_scores
if isinstance(a , a ) or top_k is None:
snake_case_ = top_k
snake_case_ = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
snake_case_ = None
else:
snake_case_ = 1
if isinstance(a , a ):
snake_case_ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
snake_case_ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a ) -> List[str]:
snake_case_ = super().__call__(*a , **a )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
snake_case_ = 'top_k' not in kwargs
if isinstance(args[0] , a ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _UpperCamelCase ( self , a , **a ) -> Dict[str, GenericTensor]:
snake_case_ = self.framework
if isinstance(a , a ):
return self.tokenizer(**a , return_tensors=a , **a )
elif isinstance(a , a ) and len(a ) == 1 and isinstance(inputs[0] , a ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a )
elif isinstance(a , a ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(a , return_tensors=a , **a )
def _UpperCamelCase ( self , a ) -> str:
return self.model(**a )
def _UpperCamelCase ( self , a , a=None , a=1 , a=True ) -> Any:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
snake_case_ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
snake_case_ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
snake_case_ = self.model.config.function_to_apply
else:
snake_case_ = ClassificationFunction.NONE
snake_case_ = model_outputs['logits'][0]
snake_case_ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
snake_case_ = sigmoid(a )
elif function_to_apply == ClassificationFunction.SOFTMAX:
snake_case_ = softmax(a )
elif function_to_apply == ClassificationFunction.NONE:
snake_case_ = outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
snake_case_ = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a )
]
if not _legacy:
dict_scores.sort(key=lambda a : x["score"] , reverse=a )
if top_k is not None:
snake_case_ = dict_scores[:top_k]
return dict_scores
| 607 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowerCAmelCase : Tuple = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 289 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
# TODO: is there an appropriate internal test set?
snake_case = "ssube/stable-diffusion-x4-upscaler-onnx"
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0 )->List[str]:
'''simple docstring'''
A_ : int = floats_tensor((1, 3, 128, 128) , rng=random.Random(_SCREAMING_SNAKE_CASE ) )
A_ : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
A_ : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = self.get_dummy_inputs()
A_ : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
A_ : Any = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ : str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Any = self.get_dummy_inputs()
A_ : Any = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[str] = self.get_dummy_inputs()
A_ : List[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : int = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[str] = self.get_dummy_inputs()
A_ : List[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : List[str] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _snake_case ( self )->int:
'''simple docstring'''
A_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Tuple = self.get_dummy_inputs()
A_ : int = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Dict = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : int = ort.SessionOptions()
A_ : Union[str, Any] = False
return options
def _snake_case ( self )->int:
'''simple docstring'''
A_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A_ : Union[str, Any] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
A_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = '''A fantasy landscape, trending on artstation'''
A_ : Optional[int] = torch.manual_seed(0 )
A_ : Optional[Any] = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
A_ : Optional[Any] = output.images
A_ : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A_ : str = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A_ : Dict = init_image.resize((128, 128) )
A_ : Dict = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
A_ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Dict = '''A fantasy landscape, trending on artstation'''
A_ : List[str] = torch.manual_seed(0 )
A_ : List[Any] = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
A_ : Any = output.images
A_ : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A_ : Optional[int] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 590 | 0 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def A_ (__a=32 , __a=10 , __a=100 , __a=1026 , __a=True , __a="data/tokenized_stories_train_wikitext103.jbl" , __a="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
A_ = generate_datasets(
_lowerCamelCase , _lowerCamelCase , number=_lowerCamelCase , min_len=1026 , trim=_lowerCamelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
A_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
A_ = load_gpta("gpt2" ).to(_lowerCamelCase )
print("computing perplexity on objective set" )
A_ = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).item()
print("perplexity on objective set:" , _lowerCamelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def A_ (__a , __a=15 , __a=128 , __a=100 , __a="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
A_ = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
A_ = SecondaryLearner(_lowerCamelCase )
# Train secondary learner
A_ = train_secondary_learner(
_lowerCamelCase , _lowerCamelCase , max_epochs=_lowerCamelCase , batch_size=_lowerCamelCase , eval_freq=100 , igf_model_path=_lowerCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def A_ (__a , __a , __a , __a=32 , __a=1000 , __a=16 , __a=1.0 , __a=recopy_gpta , __a=None , __a=10 , __a="gpt2_finetuned.pt" , ):
'''simple docstring'''
A_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
A_ = RandomSampler(_lowerCamelCase )
A_ = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase )
A_ = max_steps // (len(_lowerCamelCase )) + 1
A_ = 0
A_ = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCamelCase )
A_ = recopy_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCamelCase )
secondary_learner.eval()
A_ = []
A_ = 0
A_ = []
A_ = []
# Compute the performance of the transformer model at the beginning
A_ = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
test_perps.append(_lowerCamelCase )
print("Test perplexity, step" , _lowerCamelCase , ":" , _lowerCamelCase )
for epoch in range(int(_lowerCamelCase ) ):
for step, example in enumerate(_lowerCamelCase ):
torch.cuda.empty_cache()
A_ = random.randint(0 , example.size(2 ) - context_len - 1 )
A_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
A_ = model(_lowerCamelCase , labels=_lowerCamelCase )
A_ = True
if secondary_learner is not None:
A_ = secondary_learner.forward(
torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_lowerCamelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
A_ = -1
if predicted_q < threshold:
A_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
A_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
A_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
A_ = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
test_perps.append(_lowerCamelCase )
print("Test perplexity, step" , _lowerCamelCase , ":" , _lowerCamelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCamelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def A_ ():
'''simple docstring'''
A_ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_lowerCamelCase , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_lowerCamelCase , default=_lowerCamelCase , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_lowerCamelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_lowerCamelCase , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=_lowerCamelCase , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_lowerCamelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_lowerCamelCase , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=_lowerCamelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_lowerCamelCase , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=_lowerCamelCase , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_lowerCamelCase , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=_lowerCamelCase , type=_lowerCamelCase , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=_lowerCamelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_lowerCamelCase , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=_lowerCamelCase , type=_lowerCamelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_lowerCamelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
A_ = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
A_ = training_secondary_learner(
_lowerCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
A_ = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
A_ = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=_lowerCamelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCamelCase , secondary_learner=_lowerCamelCase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 716 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Any=13 , _snake_case : Union[str, Any]=2 , _snake_case : Optional[int]=24 , _snake_case : Optional[Any]=16 , _snake_case : List[str]=True , _snake_case : str=True , _snake_case : List[Any]=32 , _snake_case : str=5 , _snake_case : int=4 , _snake_case : List[str]=37 , _snake_case : int="gelu" , _snake_case : str=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : Optional[int]=10 , _snake_case : int=0.0_2 , _snake_case : int=None , _snake_case : Optional[Any]=2 , _snake_case : int=2 , ) -> Dict:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = patch_size
A_ = max_length
A_ = num_mel_bins
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = scope
A_ = frequency_stride
A_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
A_ = (self.max_length - self.patch_size) // self.time_stride + 1
A_ = frequency_out_dimension * time_out_dimension
A_ = num_patches + 2
def lowerCamelCase__ ( self : Dict ) -> int:
"""simple docstring"""
A_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, input_values, labels
def lowerCamelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCamelCase__ ( self : int , _snake_case : Dict , _snake_case : Tuple , _snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A_ = ASTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def lowerCamelCase__ ( self : Any , _snake_case : Tuple , _snake_case : str , _snake_case : List[str] , _snake_case : int , _snake_case : int ) -> str:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCamelCase__ ( self : str ) -> str:
"""simple docstring"""
A_ = ASTModelTester(self )
A_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
def lowerCamelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def lowerCamelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_snake_case )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["input_values"]
self.assertListEqual(arg_names[:1] , _snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ASTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A_ ():
'''simple docstring'''
A_ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
A_ , A_ = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
A_ = self.default_feature_extractor
A_ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(_snake_case )
A_ = self.default_feature_extractor
A_ , A_ = prepare_audio()
A_ = audio.squeeze().numpy()
A_ = feature_extractor(_snake_case , sampling_rate=_snake_case , return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
A_ = model(**_snake_case )
# verify the logits
A_ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _snake_case )
A_ = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 ) )
| 482 | 0 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a__ = logging.get_logger(__name__)
a__ = {}
a__ = {}
a__ = {}
def __UpperCAmelCase ( __a : type ,__a : Optional[str] ,__a : Optional[List[str]] = None ,) -> Any:
"""simple docstring"""
_a : Any = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
_a : Optional[Any] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
_a : Optional[int] = format_type
def __UpperCAmelCase ( __a : Exception ,__a : Optional[str] ,__a : Optional[List[str]] = None ) -> str:
"""simple docstring"""
_a : List[str] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_a : int = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
a__ = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
a__ = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
a__ = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __UpperCAmelCase ( __a : Optional[str] ) -> Optional[str]:
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __UpperCAmelCase ( __a : Optional[str] ,**__a : Optional[Any] ) -> Formatter:
"""simple docstring"""
_a : str = get_format_type_from_alias(__a )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__a )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 14 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = 'xmod'
def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[int]=30_522 , lowerCamelCase__ : Union[str, Any]=768 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : Optional[Any]=3_072 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : List[str]=512 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Dict=1e-1_2 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[str]="absolute" , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Tuple=("en_XX",) , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
__lowercase = pre_norm
__lowercase = adapter_reduction_factor
__lowercase = adapter_layer_norm
__lowercase = adapter_reuse_layer_norm
__lowercase = ln_before_adapter
__lowercase = list(lowerCamelCase__ )
__lowercase = default_language
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 332 | 0 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
__lowerCamelCase : Optional[int] = 'naver-clova-ix/donut-base'
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
__magic_name__: Tuple = DonutProcessor.from_pretrained(__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__magic_name__: Optional[Any] = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
__magic_name__: int = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
__magic_name__: Tuple = self.processor.tokenajson(__snake_case )
self.assertDictEqual(__snake_case , __snake_case )
| 720 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __A ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Dict = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
__magic_name__: Dict = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
] , )
__magic_name__: List[str] = text_generator.model.config.eos_token_id
__magic_name__: Dict = """<pad>"""
__magic_name__: Dict = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
] , )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
__magic_name__: Optional[int] = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple ) -> Any:
__magic_name__: int = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return text_generator, ["This is a test", "Another test"]
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: Tuple = """Hello I believe in"""
__magic_name__: List[str] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
__magic_name__: List[Any] = text_generator(__snake_case )
self.assertEqual(
__snake_case , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
__magic_name__: List[str] = text_generator(__snake_case , stop_sequence=""" fe""" )
self.assertEqual(__snake_case , [{"""generated_text""": """Hello I believe in fe"""}] )
def lowerCamelCase__ ( self : Any , __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = text_generator.model
__magic_name__: Union[str, Any] = text_generator.tokenizer
__magic_name__: Union[str, Any] = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: str = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = pipeline(task="""text-generation""" , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case )
__magic_name__: Tuple = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: List[str] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__magic_name__: Union[str, Any] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
__magic_name__: Any = text_generator("""test""" , return_full_text=__snake_case , return_text=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: List[str] = text_generator("""test""" , return_full_text=__snake_case , return_tensors=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: Tuple = text_generator("""test""" , return_text=__snake_case , return_tensors=__snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__magic_name__: int = text_generator("""""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__magic_name__: Any = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__magic_name__: Union[str, Any] = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0 )
__magic_name__: List[str] = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__snake_case ):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
import torch
# Classic `model_kwargs`
__magic_name__: Optional[int] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[int] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__magic_name__: Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[Any] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__magic_name__: int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__magic_name__: Any = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : Dict ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__snake_case , top_p=0.5 )
def lowerCamelCase__ ( self : List[str] ) -> Any:
__magic_name__: Optional[int] = """Hello world"""
__magic_name__: List[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
__magic_name__: str = logging.get_logger("""transformers.generation.tf_utils""" )
else:
__magic_name__: Any = logging.get_logger("""transformers.generation.utils""" )
__magic_name__: Union[str, Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: str = text_generator(__snake_case , max_new_tokens=1 )
self.assertNotIn(__snake_case , cl.out )
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 )
self.assertNotIn(__snake_case , cl.out )
| 213 | 0 |
import qiskit
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowerCAmelCase :Optional[Any] = qiskit.QuantumCircuit(__magic_name__ , __magic_name__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowerCAmelCase :Any = qiskit.execute(__magic_name__ , __magic_name__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__magic_name__ )
if __name__ == "__main__":
a = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''') | 687 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase : Optional[datasets.Features] = None
class UpperCAmelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase : Any = PandasConfig
def SCREAMING_SNAKE_CASE__ ( self: int ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
_lowerCAmelCase :Any = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCAmelCase :Any = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ):
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
_lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) )
yield i, self._cast_table(_UpperCAmelCase ) | 687 | 1 |
"""simple docstring"""
from math import isqrt, loga
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
UpperCAmelCase : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase : Optional[int] = False
return [i for i in range(2 , lowerCAmelCase_ ) if is_prime[i]]
def lowercase_ ( _lowercase : List[Any] = 80_08_00 , _lowercase : int = 80_08_00 ):
'''simple docstring'''
UpperCAmelCase : int = degree * loga(lowerCAmelCase_ )
UpperCAmelCase : int = int(lowerCAmelCase_ )
UpperCAmelCase : Any = calculate_prime_numbers(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : str = len(lowerCAmelCase_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 707 |
"""simple docstring"""
import os
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = os.path.join(os.path.dirname(_lowercase ) , "num.txt" )
with open(_lowercase ) as file_hand:
return str(sum(int(_lowercase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 292 | 0 |
import numpy as np
def _SCREAMING_SNAKE_CASE ( __lowercase : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _SCREAMING_SNAKE_CASE ( __lowercase : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637 |
import argparse
import os
import re
import packaging.version
__a : Tuple = "examples/"
__a : Any = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__a : Tuple = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__a : List[str] = "README.md"
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : List[str] , __lowercase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
with open(__lowercase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__A = f.read()
__A , __A = REPLACE_PATTERNS[pattern]
__A = replace.replace("""VERSION""" , __lowercase )
__A = re_pattern.sub(__lowercase , __lowercase )
with open(__lowercase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowercase )
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] ) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(__lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowercase , __lowercase ) , __lowercase , pattern="""examples""" )
def _SCREAMING_SNAKE_CASE ( __lowercase : Any , __lowercase : Optional[int]=False ) -> Dict:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowercase , __lowercase , __lowercase )
if not patch:
update_version_in_examples(__lowercase )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
__A = """🤗 Transformers currently provides the following architectures"""
__A = """1. Want to contribute a new model?"""
with open(__lowercase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__A = f.readlines()
# Find the start of the list.
__A = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__A = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__A = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowercase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowercase )
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__A = f.read()
__A = REPLACE_PATTERNS["""init"""][0].search(__lowercase ).groups()[0]
return packaging.version.parse(__lowercase )
def _SCREAMING_SNAKE_CASE ( __lowercase : str=False ) -> Optional[Any]:
"""simple docstring"""
__A = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__A = default_version.base_version
elif patch:
__A = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__A = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__A = input(f"Which version are you releasing? [{default_version}]" )
if len(__lowercase ) == 0:
__A = default_version
print(f"Updating version to {version}." )
global_version_update(__lowercase , patch=__lowercase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
__A = get_version()
__A = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__A = current_version.base_version
# Check with the user we got that right.
__A = input(f"Which version are we developing now? [{dev_version}]" )
if len(__lowercase ) == 0:
__A = dev_version
print(f"Updating version to {version}." )
global_version_update(__lowercase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__a : str = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__a : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 637 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = '''▁'''
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __lowercase ( a__ , unittest.TestCase ):
_lowerCAmelCase = BertGenerationTokenizer
_lowerCAmelCase = False
_lowerCAmelCase = True
def __magic_name__ ( self : int ):
super().setUp()
a_ = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Optional[int] ):
a_ = '''<s>'''
a_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __magic_name__ ( self : Optional[Any] ):
a_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(lowercase__ ) , 1_0_0_2 )
def __magic_name__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __magic_name__ ( self : Dict ):
a_ = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__ )
a_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
a_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a_ = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
a_ = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __magic_name__ ( self : Dict ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __magic_name__ ( self : Tuple ):
a_ = '''Hello World!'''
a_ = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@slow
def __magic_name__ ( self : Optional[Any] ):
a_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a_ = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@require_torch
@slow
def __magic_name__ ( self : Optional[Any] ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a_ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
a_ = ''' '''.join(lowercase__ )
a_ = self.big_tokenizer.encode_plus(lowercase__ , return_tensors='''pt''' , return_token_type_ids=lowercase__ )
a_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=lowercase__ )
a_ = BertGenerationConfig()
a_ = BertGenerationEncoder(lowercase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase__ )
model(**lowercase__ )
@slow
def __magic_name__ ( self : Optional[Any] ):
# fmt: off
a_ = {'''input_ids''': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 143 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowercase ( a__ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = NllbTokenizer
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__( self : Optional[int] , lowercase__ : Dict=None , lowercase__ : int=None , lowercase__ : Union[str, Any]="<s>" , lowercase__ : str="</s>" , lowercase__ : str="</s>" , lowercase__ : Union[str, Any]="<s>" , lowercase__ : Optional[int]="<unk>" , lowercase__ : Any="<pad>" , lowercase__ : List[Any]="<mask>" , lowercase__ : Optional[int]=None , lowercase__ : Union[str, Any]=None , lowercase__ : str=None , lowercase__ : Optional[Any]=False , **lowercase__ : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
a_ = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
a_ = legacy_behaviour
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , legacy_behaviour=lowercase__ , **lowercase__ , )
a_ = vocab_file
a_ = False if not self.vocab_file else True
a_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
a_ = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a_ = src_lang if src_lang is not None else '''eng_Latn'''
a_ = self.convert_tokens_to_ids(self._src_lang )
a_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __magic_name__ ( self : Union[str, Any] ):
return self._src_lang
@src_lang.setter
def __magic_name__ ( self : Optional[Any] , lowercase__ : str ):
a_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self : Tuple , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self : int , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : Tuple , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Optional[str] , lowercase__ : Optional[str] , **lowercase__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
a_ = src_lang
a_ = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
a_ = self.convert_tokens_to_ids(lowercase__ )
a_ = tgt_lang_id
return inputs
def __magic_name__ ( self : List[Any] , lowercase__ : List[str] , lowercase__ : str = "eng_Latn" , lowercase__ : Optional[List[str]] = None , lowercase__ : str = "fra_Latn" , **lowercase__ : Union[str, Any] , ):
a_ = src_lang
a_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __magic_name__ ( self : Tuple ):
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self : Optional[int] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self : Tuple , lowercase__ : int ):
a_ = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
a_ = []
a_ = [self.eos_token_id, self.cur_lang_code]
else:
a_ = [self.cur_lang_code]
a_ = [self.eos_token_id]
a_ = self.convert_ids_to_tokens(self.prefix_tokens )
a_ = self.convert_ids_to_tokens(self.suffix_tokens )
a_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ ( self : List[str] , lowercase__ : str ):
a_ = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
a_ = []
a_ = [self.eos_token_id, self.cur_lang_code]
else:
a_ = [self.cur_lang_code]
a_ = [self.eos_token_id]
a_ = self.convert_ids_to_tokens(self.prefix_tokens )
a_ = self.convert_ids_to_tokens(self.suffix_tokens )
a_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
a_ = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 143 | 1 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : str ):
__UpperCAmelCase = torch.nn.Linear(10 , 10 )
__UpperCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 49 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCamelCase :
def __init__( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any]=1_3 , __UpperCAmelCase : Union[str, Any]=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Union[str, Any]=9_9 , __UpperCAmelCase : str=[1, 1, 2] , __UpperCAmelCase : str=1 , __UpperCAmelCase : Dict=3_2 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : str=8 , __UpperCAmelCase : List[Any]=3_7 , __UpperCAmelCase : Union[str, Any]="gelu_new" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : str=5_1_2 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : int=False , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = block_sizes
SCREAMING_SNAKE_CASE__ = num_decoder_layers
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = d_head
SCREAMING_SNAKE_CASE__ = d_inner
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE__ = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE__ = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = TFFunnelModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = TFFunnelModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = TFFunnelModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : int , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = TFFunnelBaseModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = TFFunnelBaseModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = TFFunnelBaseModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = TFFunnelForPreTraining(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = TFFunnelForMaskedLM(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFFunnelForTokenClassification(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , ) -> Dict:
SCREAMING_SNAKE_CASE__ = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : str = False
lowerCamelCase__ : Any = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : List[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = TFFunnelModelTester(self , base=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 196 | 0 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = [0, 2, 4, 6, 8]
__SCREAMING_SNAKE_CASE :Optional[Any] = [1, 3, 5, 7, 9]
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : list[int] , __lowercase : int ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCAmelCase = 0
for digit in range(10 ):
_UpperCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __lowercase , __lowercase )
return result
_UpperCAmelCase = 0
for digita in range(10 ):
_UpperCAmelCase = digita
if (remainder + digita) % 2 == 0:
_UpperCAmelCase = ODD_DIGITS
else:
_UpperCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
_UpperCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __lowercase , __lowercase , )
return result
def UpperCAmelCase_ ( __lowercase : int = 9 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__lowercase , 0 , [0] * length , __lowercase )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 119 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ ( unittest.TestCase ):
def lowercase ( self : str ):
_UpperCAmelCase = "ylacombe/bark-small"
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = "en_speaker_1"
_UpperCAmelCase = "This is a test string"
_UpperCAmelCase = "speaker_embeddings_path.json"
_UpperCAmelCase = "speaker_embeddings"
def lowercase ( self : Optional[int] , **snake_case_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowercase ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Dict ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase = 3_5
_UpperCAmelCase = 2
_UpperCAmelCase = 8
_UpperCAmelCase = {
"semantic_prompt": np.ones(snake_case_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase = processor(text=self.input_string , voice_preset=snake_case_ )
_UpperCAmelCase = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase = os.path.join(self.tmpdirname , "file.npz" )
np.savez(snake_case_ , **snake_case_ )
_UpperCAmelCase = processor(text=self.input_string , voice_preset=snake_case_ )
_UpperCAmelCase = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase ( self : Any ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=snake_case_ )
_UpperCAmelCase = processor(text=self.input_string )
_UpperCAmelCase = tokenizer(
self.input_string , padding="max_length" , max_length=2_5_6 , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 119 | 1 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A )
class lowercase_ ( A ):
"""simple docstring"""
def __init__( self : Union[str, Any] , **__lowerCamelCase : Any ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(__lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : str , __lowerCamelCase : int , *__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return super().__call__(__lowerCamelCase , *__lowerCamelCase , num_workers=__lowerCamelCase , batch_size=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=6_4 , __lowerCamelCase : int = 0 , __lowerCamelCase : float = 5_1_2 / 1_5_0_0 , __lowerCamelCase : Optional[int] = 3_2 , __lowerCamelCase : Optional[int] = 1 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = load_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.image_processor.size["longest_edge"]
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.image_processor(images=__lowerCamelCase , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(__lowerCamelCase , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , __lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=0.8_8 , __lowerCamelCase : Union[str, Any]=0.9_5 , __lowerCamelCase : int=0 , __lowerCamelCase : str=1 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = model_inputs.pop("input_boxes" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("is_last" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("original_sizes" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("reshaped_input_sizes" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**__lowerCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["pred_masks"]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , binarize=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = model_outputs["iou_scores"]
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCAmelCase_ ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : List[Any]=0.7 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
_SCREAMING_SNAKE_CASE = torch.cat(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.cat(__lowerCamelCase )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = defaultdict(__lowerCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 418 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase_ = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase_ = {
'google/realm-cc-news-pretrained-embedder': 5_12,
'google/realm-cc-news-pretrained-encoder': 5_12,
'google/realm-cc-news-pretrained-scorer': 5_12,
'google/realm-cc-news-pretrained-openqa': 5_12,
'google/realm-orqa-nq-openqa': 5_12,
'google/realm-orqa-nq-reader': 5_12,
'google/realm-orqa-wq-openqa': 5_12,
'google/realm-orqa-wq-reader': 5_12,
}
lowerCamelCase_ = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = RealmTokenizer
def __init__( self : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]="[UNK]" , __lowerCamelCase : Any="[SEP]" , __lowerCamelCase : List[str]="[PAD]" , __lowerCamelCase : Optional[Any]="[CLS]" , __lowerCamelCase : List[str]="[MASK]" , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
_SCREAMING_SNAKE_CASE = do_lower_case
_SCREAMING_SNAKE_CASE = strip_accents
_SCREAMING_SNAKE_CASE = tokenize_chinese_chars
_SCREAMING_SNAKE_CASE = normalizer_class(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = do_lower_case
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : List[str] , **__lowerCamelCase : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
_SCREAMING_SNAKE_CASE = text
_SCREAMING_SNAKE_CASE = kwargs.pop("text_pair" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = kwargs.pop("return_tensors" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(__lowerCamelCase ):
if batch_text_pair is not None:
_SCREAMING_SNAKE_CASE = batch_text_pair[idx]
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = encoded_candidates.get("input_ids" )
_SCREAMING_SNAKE_CASE = encoded_candidates.get("attention_mask" )
_SCREAMING_SNAKE_CASE = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowerCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowerCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {key: item for key, item in output_data.items() if len(__lowerCamelCase ) != 0}
return BatchEncoding(__lowerCamelCase , tensor_type=__lowerCamelCase )
def lowerCAmelCase_ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : str=None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 418 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : int = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 419 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
A_ : Any = logging.get_logger(__name__)
A_ : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ : int = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
A_ : Optional[Any] = {"mobilebert-uncased": 512}
A_ : Optional[Any] = {}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = MobileBertTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
snake_case__ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
snake_case__ : List[str] = do_lower_case
snake_case__ : List[Any] = strip_accents
snake_case__ : Any = tokenize_chinese_chars
snake_case__ : List[Any] = normalizer_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = do_lower_case
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Optional[int] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 419 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = LayoutLMTokenizer
lowercase = LayoutLMTokenizerFast
lowercase = True
lowercase = True
def __lowercase ( self : List[Any] ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __lowercase ( self : Dict , **lowerCamelCase : Optional[Any] ) -> str:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : List[str] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[int] = """UNwant\u00E9d,running"""
lowerCAmelCase_ : Optional[int] = """unwanted, running"""
return input_text, output_text
def __lowercase ( self : List[str] ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ : Dict = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[Any] ) -> Union[str, Any]:
pass
| 275 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : list , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : int = []
lowerCAmelCase_, lowerCAmelCase_ : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCAmelCase_ : Dict = result + left + right
return input_list
def UpperCamelCase_ ( A__ : list ):
'''simple docstring'''
if len(A__ ) <= 1:
return input_list
lowerCAmelCase_ : str = list(A__ )
# iteration for two-way merging
lowerCAmelCase_ : Dict = 2
while p <= len(A__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A__ ) , A__ ):
lowerCAmelCase_ : Tuple = i
lowerCAmelCase_ : List[str] = i + p - 1
lowerCAmelCase_ : Optional[Any] = (low + high + 1) // 2
lowerCAmelCase_ : List[str] = merge(A__ , A__ , A__ , A__ )
# final merge of last two parts
if p * 2 >= len(A__ ):
lowerCAmelCase_ : Union[str, Any] = i
lowerCAmelCase_ : Dict = merge(A__ , 0 , A__ , len(A__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__A : int = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
__A : str = []
else:
__A : Optional[Any] = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 275 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[Any] =CLIPTokenizer
UpperCamelCase__ : int =CLIPTokenizerFast
UpperCamelCase__ : Any =True
UpperCamelCase__ : Optional[int] ={}
UpperCamelCase__ : Union[str, Any] =False
def A__ ( self : List[str] ):
super().setUp()
# fmt: off
lowercase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ = dict(zip(__lowercase, range(len(__lowercase ) ) ) )
lowercase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
lowercase__ = {"unk_token": "<unk>"}
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(__lowercase ) )
def A__ ( self : Optional[Any], **__lowercase : Any ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__lowercase )
def A__ ( self : List[Any], **__lowercase : List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__lowercase )
def A__ ( self : int, __lowercase : Any ):
lowercase__ = "lower newer"
lowercase__ = "lower newer"
return input_text, output_text
def A__ ( self : Tuple ):
lowercase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowercase__ = "lower newer"
lowercase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
lowercase__ = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), __lowercase )
@require_ftfy
def A__ ( self : Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.tokenizer_class.from_pretrained(__lowercase, **__lowercase )
lowercase__ = self.rust_tokenizer_class.from_pretrained(__lowercase, **__lowercase )
lowercase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
lowercase__ = tokenizer_s.tokenize(__lowercase )
lowercase__ = tokenizer_r.tokenize(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase__ = "xa\u0303y" + " " + "x\xe3y"
lowercase__ = tokenizer_s.tokenize(__lowercase )
lowercase__ = tokenizer_r.tokenize(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
# Test that the tokenization is identical on unicode of space type
lowercase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase__ = tokenizer_s.tokenize(__lowercase )
lowercase__ = tokenizer_r.tokenize(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
# Test that the tokenization is identical on unicode of line break type
lowercase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase__ = tokenizer_s.tokenize(__lowercase )
lowercase__ = tokenizer_r.tokenize(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
def A__ ( self : Dict ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__ = F'''{text_of_1_token} {text_of_1_token}'''
lowercase__ = self.rust_tokenizer_class.from_pretrained(
__lowercase, use_fast=__lowercase, )
lowercase__ = tokenizer_r(__lowercase, return_offsets_mapping=__lowercase, add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0], (0, len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1], (len(__lowercase ) + 1, len(__lowercase ) + 1 + len(__lowercase )), )
lowercase__ = F''' {text}'''
lowercase__ = self.rust_tokenizer_class.from_pretrained(
__lowercase, use_fast=__lowercase, )
lowercase__ = tokenizer_r(__lowercase, return_offsets_mapping=__lowercase, add_special_tokens=__lowercase )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(__lowercase )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(__lowercase ) + 1, 1 + len(__lowercase ) + 1 + len(__lowercase )), )
def A__ ( self : Tuple ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(__lowercase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def A__ ( self : Dict ):
super().test_tokenization_python_rust_equals()
def A__ ( self : Union[str, Any] ):
# CLIP always lower cases letters
pass
| 37 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
for ch in input_str:
lowercase__ = ord(SCREAMING_SNAKE_CASE_ )
lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 1 |
import os
def UpperCamelCase( ):
with open(os.path.dirname(__UpperCamelCase ) + '''/p022_names.txt''' ) as file:
lowerCAmelCase_ : List[str] = str(file.readlines()[0] )
lowerCAmelCase_ : List[str] = names.replace('''"''' ,'''''' ).split(''',''' )
names.sort()
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Any = 0
for i, name in enumerate(__UpperCamelCase ):
for letter in name:
name_score += ord(__UpperCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCAmelCase_ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 171 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : str , A_ : Union[str, Any] , A_ : List[Any]=3 , A_ : Union[str, Any]=3_2 , A_ : List[Any]=3 , A_ : Any=1_0 , A_ : List[Any]=[8, 1_6, 3_2, 6_4] , A_ : List[Any]=[1, 1, 2, 1] , A_ : Tuple=True , A_ : Dict=True , A_ : int="relu" , A_ : Optional[int]=3 , A_ : List[Any]=None , A_ : Optional[int]=["stage2", "stage3", "stage4"] , A_ : str=[2, 3, 4] , A_ : int=1 , ):
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : str = embeddings_size
lowerCAmelCase_ : Optional[Any] = hidden_sizes
lowerCAmelCase_ : Any = depths
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : Any = use_labels
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : str = num_labels
lowerCAmelCase_ : Tuple = scope
lowerCAmelCase_ : Union[str, Any] = len(A_)
lowerCAmelCase_ : Tuple = out_features
lowerCAmelCase_ : str = out_indices
lowerCAmelCase_ : Optional[Any] = num_groups
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels)
lowerCAmelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : str):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def UpperCAmelCase__ ( self : Any , A_ : Optional[int] , A_ : Any , A_ : Optional[Any]):
lowerCAmelCase_ : Any = BitModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Tuple = model(A_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase__ ( self : List[str] , A_ : List[Any] , A_ : List[str] , A_ : Any):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : Optional[int] = BitForImageClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[int] = model(A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : int , A_ : int , A_ : str , A_ : Dict):
lowerCAmelCase_ : Any = BitBackbone(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : List[str] = model(A_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = BitBackbone(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[Any] = model(A_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = config_and_inputs
lowerCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_a = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Tuple = BitModelTester(self)
lowerCAmelCase_ : str = ConfigTester(self , config_class=A_ , has_text_modality=A_)
def UpperCAmelCase__ ( self : Optional[Any]):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Tuple):
return
@unittest.skip(reason='''Bit does not output attentions''')
def UpperCAmelCase__ ( self : Optional[Any]):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''')
def UpperCAmelCase__ ( self : Any):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''')
def UpperCAmelCase__ ( self : Dict):
pass
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[Any] = model_class(A_)
lowerCAmelCase_ : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(config=A_)
for name, module in model.named_modules():
if isinstance(A_ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def UpperCAmelCase__ ( self : Tuple):
def check_hidden_states_output(A_ : Dict , A_ : List[Any] , A_ : Optional[int]):
lowerCAmelCase_ : List[str] = model_class(A_)
model.to(A_)
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**self._prepare_for_class(A_ , A_))
lowerCAmelCase_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(A_) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : int = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase_ : Optional[Any] = layer_type
lowerCAmelCase_ : str = True
check_hidden_states_output(A_ , A_ , A_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : Optional[int] = True
check_hidden_states_output(A_ , A_ , A_)
@unittest.skip(reason='''Bit does not use feedforward chunking''')
def UpperCAmelCase__ ( self : Any):
pass
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_)
@slow
def UpperCAmelCase__ ( self : Optional[int]):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Dict = BitModel.from_pretrained(A_)
self.assertIsNotNone(A_)
def UpperCamelCase( ):
lowerCAmelCase_ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any]):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(A_)
lowerCAmelCase_ : Optional[Any] = self.default_image_processor
lowerCAmelCase_ : List[Any] = prepare_img()
lowerCAmelCase_ : Optional[Any] = image_processor(images=A_ , return_tensors='''pt''').to(A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : int = model(**A_)
# verify the logits
lowerCAmelCase_ : str = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , A_)
lowerCAmelCase_ : Any = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(A_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4))
@require_torch
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
_a = (BitBackbone,) if is_torch_available() else ()
_a = BitConfig
_a = False
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Optional[int] = BitModelTester(self)
| 171 | 1 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class _snake_case ( _a ):
_A : Tuple = '''owlvit_text_model'''
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=49_408 ,SCREAMING_SNAKE_CASE__ : List[Any]=512 ,SCREAMING_SNAKE_CASE__ : Dict=2_048 ,SCREAMING_SNAKE_CASE__ : List[Any]=12 ,SCREAMING_SNAKE_CASE__ : List[Any]=8 ,SCREAMING_SNAKE_CASE__ : Any=16 ,SCREAMING_SNAKE_CASE__ : Dict="quick_gelu" ,SCREAMING_SNAKE_CASE__ : int=1e-5 ,SCREAMING_SNAKE_CASE__ : int=0.0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : List[str]=1.0 ,SCREAMING_SNAKE_CASE__ : str=0 ,SCREAMING_SNAKE_CASE__ : int=49_406 ,SCREAMING_SNAKE_CASE__ : Dict=49_407 ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE:Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE:Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE:Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE:List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE:int = max_position_embeddings
SCREAMING_SNAKE_CASE:Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE:List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE:List[Any] = attention_dropout
SCREAMING_SNAKE_CASE:Dict = initializer_range
SCREAMING_SNAKE_CASE:int = initializer_factor
@classmethod
def __UpperCamelCase ( cls : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] ,**SCREAMING_SNAKE_CASE__ : Tuple ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE:Dict = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
class _snake_case ( _a ):
_A : Optional[int] = '''owlvit_vision_model'''
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Any=768 ,SCREAMING_SNAKE_CASE__ : str=3_072 ,SCREAMING_SNAKE_CASE__ : Optional[int]=12 ,SCREAMING_SNAKE_CASE__ : str=12 ,SCREAMING_SNAKE_CASE__ : Dict=3 ,SCREAMING_SNAKE_CASE__ : Dict=768 ,SCREAMING_SNAKE_CASE__ : Optional[int]=32 ,SCREAMING_SNAKE_CASE__ : Dict="quick_gelu" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1e-5 ,SCREAMING_SNAKE_CASE__ : List[str]=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : str=1.0 ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = hidden_size
SCREAMING_SNAKE_CASE:Any = intermediate_size
SCREAMING_SNAKE_CASE:List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE:str = num_attention_heads
SCREAMING_SNAKE_CASE:str = num_channels
SCREAMING_SNAKE_CASE:Union[str, Any] = image_size
SCREAMING_SNAKE_CASE:Optional[int] = patch_size
SCREAMING_SNAKE_CASE:Dict = hidden_act
SCREAMING_SNAKE_CASE:Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE:Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE:Optional[int] = initializer_range
SCREAMING_SNAKE_CASE:int = initializer_factor
@classmethod
def __UpperCamelCase ( cls : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] ,**SCREAMING_SNAKE_CASE__ : Any ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE:List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
class _snake_case ( _a ):
_A : Union[str, Any] = '''owlvit'''
_A : Optional[Any] = True
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : List[str]=512 ,SCREAMING_SNAKE_CASE__ : List[Any]=2.6_592 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(**SCREAMING_SNAKE_CASE__ )
if text_config is None:
SCREAMING_SNAKE_CASE:Tuple = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE:Dict = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
SCREAMING_SNAKE_CASE:str = OwlViTTextConfig(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = projection_dim
SCREAMING_SNAKE_CASE:Union[str, Any] = logit_scale_init_value
SCREAMING_SNAKE_CASE:Optional[int] = return_dict
SCREAMING_SNAKE_CASE:Optional[int] = 1.0
@classmethod
def __UpperCamelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] ,**SCREAMING_SNAKE_CASE__ : Dict ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
@classmethod
def __UpperCamelCase ( cls : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:str = {}
SCREAMING_SNAKE_CASE:List[str] = text_config
SCREAMING_SNAKE_CASE:Tuple = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Optional[Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE:int = self.text_config.to_dict()
SCREAMING_SNAKE_CASE:Union[str, Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE:Union[str, Any] = self.__class__.model_type
return output
class _snake_case ( _a ):
@property
def __UpperCamelCase ( self : List[Any] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def __UpperCamelCase ( self : int ):
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def __UpperCamelCase ( self : Optional[Any] ):
return 1e-4
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : "ProcessorMixin" ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None ,):
SCREAMING_SNAKE_CASE:List[Any] = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = super().generate_dummy_inputs(
processor.image_processor ,batch_size=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
return {**text_input_dict, **image_input_dict}
@property
def __UpperCamelCase ( self : Optional[Any] ):
return 14
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
_A : Any = ['''input_features''', '''is_longer''']
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : str=64 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=48_000 ,SCREAMING_SNAKE_CASE__ : List[str]=480 ,SCREAMING_SNAKE_CASE__ : Optional[int]=10 ,SCREAMING_SNAKE_CASE__ : List[Any]=1_024 ,SCREAMING_SNAKE_CASE__ : List[str]=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : float = 0 ,SCREAMING_SNAKE_CASE__ : float = 14_000 ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : str = "fusion" ,SCREAMING_SNAKE_CASE__ : str = "repeatpad" ,**SCREAMING_SNAKE_CASE__ : int ,):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,padding_value=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:Dict = top_db
SCREAMING_SNAKE_CASE:Union[str, Any] = truncation
SCREAMING_SNAKE_CASE:Optional[Any] = padding
SCREAMING_SNAKE_CASE:int = fft_window_size
SCREAMING_SNAKE_CASE:Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE:Dict = hop_length
SCREAMING_SNAKE_CASE:List[Any] = max_length_s
SCREAMING_SNAKE_CASE:Union[str, Any] = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE:int = sampling_rate
SCREAMING_SNAKE_CASE:Optional[int] = frequency_min
SCREAMING_SNAKE_CASE:List[str] = frequency_max
SCREAMING_SNAKE_CASE:int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=SCREAMING_SNAKE_CASE__ ,min_frequency=SCREAMING_SNAKE_CASE__ ,max_frequency=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,norm=SCREAMING_SNAKE_CASE__ ,mel_scale="htk" ,)
SCREAMING_SNAKE_CASE:Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=SCREAMING_SNAKE_CASE__ ,min_frequency=SCREAMING_SNAKE_CASE__ ,max_frequency=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,norm="slaney" ,mel_scale="slaney" ,)
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:List[str] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : np.array ,SCREAMING_SNAKE_CASE__ : Optional[np.array] = None ):
SCREAMING_SNAKE_CASE:Dict = spectrogram(
SCREAMING_SNAKE_CASE__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=SCREAMING_SNAKE_CASE__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE:Tuple = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE:Optional[Any] = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE:Optional[Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE:Optional[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE:Any = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE:Union[str, Any] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE:Any = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE:Optional[int] = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE:List[str] = torch.nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE:Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : np.array ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE:Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE:Dict = len(SCREAMING_SNAKE_CASE__ ) - max_length
SCREAMING_SNAKE_CASE:List[str] = np.random.randint(0 ,overflow + 1 )
SCREAMING_SNAKE_CASE:int = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE:Optional[int] = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE:Union[str, Any] = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters )
SCREAMING_SNAKE_CASE:Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE:Dict = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE:Union[str, Any] = np.stack([mel, mel, mel, mel] ,axis=0 )
SCREAMING_SNAKE_CASE:Dict = False
else:
SCREAMING_SNAKE_CASE:Optional[Any] = self._random_mel_fusion(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE:List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE:List[Any] = int(max_length / len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:Dict = np.stack(np.tile(SCREAMING_SNAKE_CASE__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE:Dict = int(max_length / len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:int = np.stack(np.tile(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:Tuple = np.pad(SCREAMING_SNAKE_CASE__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE:List[Any] = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters )
SCREAMING_SNAKE_CASE:Dict = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
SCREAMING_SNAKE_CASE:Any = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,SCREAMING_SNAKE_CASE__ : str = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
SCREAMING_SNAKE_CASE:Union[str, Any] = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE:List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE:Optional[Any] = isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE:Dict = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE:Any = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ):
SCREAMING_SNAKE_CASE:str = np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE:Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE:List[str] = [np.asarray(SCREAMING_SNAKE_CASE__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE:List[str] = [
self._get_input_mel(SCREAMING_SNAKE_CASE__ ,max_length if max_length else self.nb_max_samples ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE:Union[str, Any] = []
SCREAMING_SNAKE_CASE:Dict = []
for mel, longer in padded_inputs:
input_mel.append(SCREAMING_SNAKE_CASE__ )
is_longer.append(SCREAMING_SNAKE_CASE__ )
if truncation == "fusion" and sum(SCREAMING_SNAKE_CASE__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE:str = np.random.randint(0 ,len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:str = True
if isinstance(input_mel[0] ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Dict = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE:int = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE:Tuple = {"input_features": input_mel, "is_longer": is_longer}
SCREAMING_SNAKE_CASE:str = BatchFeature(SCREAMING_SNAKE_CASE__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE:List[str] = input_features.convert_to_tensors(SCREAMING_SNAKE_CASE__ )
return input_features
| 465 | 0 |
"""simple docstring"""
def lowerCAmelCase__ ( __magic_name__ ) ->str:
return "".join([hex(__magic_name__ )[2:].zfill(2 ).upper() for byte in list(__magic_name__ )] )
def lowerCAmelCase__ ( __magic_name__ ) ->bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(__magic_name__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__magic_name__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(__magic_name__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : List[Any] = """codegen"""
_lowerCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _lowerCamelCase=50_400 , _lowerCamelCase=2_048 , _lowerCamelCase=2_048 , _lowerCamelCase=4_096 , _lowerCamelCase=28 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=None , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=50_256 , _lowerCamelCase=50_256 , _lowerCamelCase=False , **_lowerCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = n_ctx
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = n_inner
__lowercase = rotary_dim
__lowercase = activation_function
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = attn_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class __a ( __a ):
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase = "default" , _lowerCamelCase = None , _lowerCamelCase = False , ) -> str:
'''simple docstring'''
super().__init__(_lowerCamelCase , task=_lowerCamelCase , patching_specs=_lowerCamelCase , use_past=_lowerCamelCase )
if not getattr(self._config , "pad_token_id" , _lowerCamelCase ):
# TODO: how to do that better?
__lowercase = 0
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowercase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction="inputs" )
__lowercase = {0: "batch", 1: "past_sequence + sequence"}
else:
__lowercase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowercase = super(_lowerCamelCase , self ).generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowercase , __lowercase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(self.num_layers )
]
__lowercase = common_inputs["attention_mask"]
if self.use_past:
__lowercase = ordered_inputs["attention_mask"].dtype
__lowercase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 13
| 118 | 1 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def snake_case__ ( __lowercase = "isbn/0140328726" ) -> dict:
"""simple docstring"""
A__ : Any = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
A__ : str = F'{olid} is not a valid Open Library olid'
raise ValueError(__lowercase )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def snake_case__ ( __lowercase ) -> dict:
"""simple docstring"""
A__ : List[Any] = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
A__ : int = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A__ : Optional[int] = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
A__ : Optional[int] = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(__lowercase , __lowercase ):
A__ : int = ", ".join(__lowercase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
snake_case : List[str] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
snake_case : str = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print('\n'.join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""") | 182 |
from __future__ import annotations
snake_case : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , _A : dict[str, list[str]] , _A : str):
A__ : Optional[Any] = graph
# mapping node to its parent in resulting breadth first tree
A__ : dict[str, str | None] = {}
A__ : List[str] = source_vertex
def _lowercase ( self : List[Any]):
A__ : str = {self.source_vertex}
A__ : List[str] = None
A__ : List[str] = [self.source_vertex] # first in first out queue
while queue:
A__ : int = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_A)
A__ : Any = vertex
queue.append(_A)
def _lowercase ( self : int , _A : str):
if target_vertex == self.source_vertex:
return self.source_vertex
A__ : List[Any] = self.parent.get(_A)
if target_vertex_parent is None:
A__ : Union[str, Any] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(_A)
return self.shortest_path(_A) + F'->{target_vertex}'
if __name__ == "__main__":
snake_case : Any = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo')) | 182 | 1 |
from typing import List
import numpy as np
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Any ={key: len(__a ) for key, value in gen_kwargs.items() if isinstance(__a , __a )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
lowerCamelCase__: int =max(lists_lengths.values() , default=0 )
return max(1 , __a )
def lowerCAmelCase_ ( __a , __a ) -> List[range]:
"""simple docstring"""
lowerCamelCase__: int =[]
for group_idx in range(__a ):
lowerCamelCase__: int =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase__: str =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase__: Dict =range(__a , start + num_shards_to_add )
shards_indices_per_group.append(__a )
return shards_indices_per_group
def lowerCAmelCase_ ( __a , __a ) -> List[dict]:
"""simple docstring"""
lowerCamelCase__: List[Any] =_number_of_shards_in_gen_kwargs(__a )
if num_shards == 1:
return [dict(__a )]
else:
lowerCamelCase__: List[str] =_distribute_shards(num_shards=__a , max_num_jobs=__a )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__a , __a )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__a ) )
]
def lowerCAmelCase_ ( __a ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __a )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
lowerCamelCase__: Any ={len(__a ) for value in gen_kwargs.values() if isinstance(__a , __a )}
lowerCamelCase__: Dict ={}
for size in list_sizes:
lowerCamelCase__: Dict =list(range(__a ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase__: List[Any] =dict(__a )
for key, value in shuffled_kwargs.items():
if isinstance(__a , __a ):
lowerCamelCase__: int =[value[i] for i in indices_per_size[len(__a )]]
return shuffled_kwargs
| 59 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__A = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__A = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = DistilBertTokenizer
def __init__(self : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]="[UNK]" , UpperCAmelCase_ : Dict="[SEP]" , UpperCAmelCase_ : Dict="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[str] , ) ->str:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: List[str] =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: Optional[int] =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: Any =tokenize_chinese_chars
lowerCamelCase__: Any =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: str =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None) ->Dict:
'''simple docstring'''
lowerCamelCase__: str =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: str =[self.sep_token_id]
lowerCamelCase__: str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: str =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 59 | 1 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : str , **__snake_case : str )-> Optional[Any]:
super().__init__(**__a )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(__a )
def lowerCAmelCase ( self : Union[str, Any] , **__snake_case : List[Any] )-> int:
snake_case = {}
snake_case = {}
snake_case = {}
# preprocess args
if "points_per_batch" in kwargs:
snake_case = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
snake_case = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
snake_case = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
snake_case = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
snake_case = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
snake_case = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
snake_case = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
snake_case = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
snake_case = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
snake_case = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
snake_case = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Any , __snake_case : Any , *__snake_case : Union[str, Any] , __snake_case : Optional[int]=None , __snake_case : Any=None , **__snake_case : int )-> Dict:
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def lowerCAmelCase ( self : int , __snake_case : List[str] , __snake_case : Any=64 , __snake_case : List[Any] = 0 , __snake_case : int = 5_12 / 15_00 , __snake_case : Tuple = 32 , __snake_case : Union[str, Any] = 1 , )-> List[Any]:
snake_case = load_image(__a )
snake_case = self.image_processor.size['longest_edge']
snake_case = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
snake_case = self.image_processor(images=__a , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
snake_case = self.get_inference_context()
with inference_context():
snake_case = self._ensure_tensor_on_device(__a , device=self.device )
snake_case = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
snake_case = image_embeddings
snake_case = grid_points.shape[1]
snake_case = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , __a , __a ):
snake_case = grid_points[:, i : i + points_per_batch, :, :]
snake_case = input_labels[:, i : i + points_per_batch]
snake_case = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCAmelCase ( self : Any , __snake_case : List[Any] , __snake_case : Tuple=0.88 , __snake_case : Dict=0.95 , __snake_case : List[Any]=0 , __snake_case : Any=1 , )-> Optional[int]:
snake_case = model_inputs.pop("""input_boxes""" )
snake_case = model_inputs.pop("""is_last""" )
snake_case = model_inputs.pop("""original_sizes""" ).tolist()
snake_case = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
snake_case = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case = model_outputs['pred_masks']
snake_case = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
snake_case = model_outputs['iou_scores']
snake_case = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCAmelCase ( self : Tuple , __snake_case : Tuple , __snake_case : Dict=False , __snake_case : Union[str, Any]=False , __snake_case : Any=0.7 , )-> List[str]:
snake_case = []
snake_case = []
snake_case = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
snake_case = torch.cat(__a )
snake_case = torch.cat(__a )
snake_case = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
snake_case = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
snake_case = {}
if output_rle_mask:
snake_case = rle_mask
if output_bboxes_mask:
snake_case = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 717 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE = "docs/source/en/_toctree.yml"
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> Optional[int]:
snake_case = defaultdict(__lowerCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case = [key for key, value in counts.items() if value > 1]
snake_case = []
for duplicate_key in duplicates:
snake_case = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(__lowerCAmelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : s["title"].lower() )
def __lowerCamelCase ( __lowerCAmelCase : Any=False ) -> Optional[Any]:
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case = yaml.safe_load(f.read() )
# Get to the API doc
snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case = content[api_idx]["""sections"""]
# Then to the model doc
snake_case = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case = api_doc[model_idx]["""sections"""]
snake_case = [(idx, section) for idx, section in enumerate(__lowerCAmelCase ) if """sections""" in section]
snake_case = False
for idx, modality_doc in modalities_docs:
snake_case = modality_doc["""sections"""]
snake_case = clean_model_doc_toc(__lowerCAmelCase )
if old_modality_doc != new_modality_doc:
snake_case = True
if overwrite:
snake_case = new_modality_doc
if diff:
if overwrite:
snake_case = model_doc
snake_case = api_doc
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 517 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : int = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Tuple = """gptsan-japanese"""
lowerCamelCase__ : Dict = [
"""past_key_values""",
]
lowerCamelCase__ : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , A_=3_60_00 , A_=12_80 , A_=10_24 , A_=81_92 , A_=40_96 , A_=1_28 , A_=10 , A_=0 , A_=16 , A_=16 , A_=1_28 , A_=0.0 , A_=1E-5 , A_=False , A_=0.0 , A_="float32" , A_=False , A_=False , A_=False , A_=0.002 , A_=False , A_=True , A_=3_59_98 , A_=3_59_95 , A_=3_59_99 , **A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = d_ext
SCREAMING_SNAKE_CASE__ = d_spout
SCREAMING_SNAKE_CASE__ = num_switch_layers
SCREAMING_SNAKE_CASE__ = num_ext_layers
SCREAMING_SNAKE_CASE__ = num_switch_layers + num_ext_layers
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = num_experts
SCREAMING_SNAKE_CASE__ = expert_capacity
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = router_bias
SCREAMING_SNAKE_CASE__ = router_jitter_noise
SCREAMING_SNAKE_CASE__ = router_dtype
SCREAMING_SNAKE_CASE__ = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE__ = output_hidden_states
SCREAMING_SNAKE_CASE__ = output_attentions
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = output_router_logits
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(
separator_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , **A_ , )
| 100 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( __lowerCamelCase ):
def __init__( self :int , lowercase :Optional[Any] , lowercase :Optional[int]=1_3 , lowercase :Any=7 , lowercase :Tuple=True , lowercase :Optional[int]=True , lowercase :Any=False , lowercase :Any=True , lowercase :Dict=9_9 , lowercase :Dict=3_2 , lowercase :Any=5 , lowercase :Optional[Any]=4 , lowercase :List[str]=6_4 , lowercase :Optional[int]="gelu" , lowercase :int=0.1 , lowercase :str=0.1 , lowercase :List[str]=5_1_2 , lowercase :int=1_6 , lowercase :Any=2 , lowercase :Union[str, Any]=0.02 , lowercase :Optional[int]=3 , lowercase :Optional[Any]=4 , lowercase :Tuple=None , lowercase :int=2 , lowercase :Tuple=2 , lowercase :List[Any]=2 , lowercase :Optional[int]=2 , lowercase :Tuple=4 , lowercase :int=1 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = q_groups
SCREAMING_SNAKE_CASE = k_groups
SCREAMING_SNAKE_CASE = v_groups
SCREAMING_SNAKE_CASE = post_attention_groups
SCREAMING_SNAKE_CASE = intermediate_groups
SCREAMING_SNAKE_CASE = output_groups
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def snake_case__ ( self :Optional[Any] , lowercase :Optional[Any] , lowercase :int , lowercase :Any , lowercase :List[str] , lowercase :Optional[Any] , lowercase :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertModel(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , lowercase )
SCREAMING_SNAKE_CASE = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self :Dict , lowercase :Dict , lowercase :List[Any] , lowercase :str , lowercase :Union[str, Any] , lowercase :Dict , lowercase :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self :List[str] , lowercase :Optional[Any] , lowercase :Optional[int] , lowercase :str , lowercase :int , lowercase :Optional[Any] , lowercase :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self :Any , lowercase :Optional[Any] , lowercase :List[str] , lowercase :int , lowercase :Any , lowercase :Optional[int] , lowercase :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self :str , lowercase :List[Any] , lowercase :List[str] , lowercase :Optional[int] , lowercase :Tuple , lowercase :Tuple , lowercase :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self :int , lowercase :List[str] , lowercase :List[Any] , lowercase :Tuple , lowercase :str , lowercase :Optional[Any] , lowercase :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = SqueezeBertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = config_and_inputs
SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : int = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ : int = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : int = True
UpperCamelCase_ : List[Any] = False
def snake_case__ ( self :Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowercase , dim=3_7 )
def snake_case__ ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase )
def snake_case__ ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase )
def snake_case__ ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase )
def snake_case__ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase )
def snake_case__ ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase )
def snake_case__ ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase )
@slow
def snake_case__ ( self :Dict ) -> str:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SqueezeBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def snake_case__ ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE = model(lowercase )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase )
SCREAMING_SNAKE_CASE = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-4 ) ) | 201 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__UpperCAmelCase = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__UpperCAmelCase = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__UpperCAmelCase = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
"""
def _lowerCamelCase ( A_ : str , A_ : Tuple ) -> Dict:
'''simple docstring'''
return float((preds == labels).mean() )
def _lowerCamelCase ( A_ : List[Any] , A_ : Optional[Any] , A_ : str="binary" ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =simple_accuracy(lowercase__ , lowercase__ )
UpperCamelCase__ : List[Any] =float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCamelCase ( A_ : str , A_ : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Tuple ={}
for id_pred, label in zip(lowercase__ , lowercase__ ):
UpperCamelCase__ : Any =f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
UpperCamelCase__ : Dict =id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase__ : Dict =[(pred, label)]
UpperCamelCase__ , UpperCamelCase__ : Tuple =[], []
for question, preds_labels in question_map.items():
UpperCamelCase__ , UpperCamelCase__ : Any =zip(*lowercase__ )
UpperCamelCase__ : List[Any] =fa_score(y_true=lowercase__ , y_pred=lowercase__ , average="macro" )
fas.append(lowercase__ )
UpperCamelCase__ : Optional[Any] =int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) )
ems.append(lowercase__ )
UpperCamelCase__ : str =float(sum(lowercase__ ) / len(lowercase__ ) )
UpperCamelCase__ : Optional[int] =sum(lowercase__ ) / len(lowercase__ )
UpperCamelCase__ : Any =float(fa_score(y_true=lowercase__ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self) -> str:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64"),
"query": datasets.Value("int64"),
},
"prediction_text": datasets.Value("string"),
},
"references": {
"idx": {
"passage": datasets.Value("int64"),
"query": datasets.Value("int64"),
},
"answers": datasets.Sequence(datasets.Value("string")),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64"),
"paragraph": datasets.Value("int64"),
"question": datasets.Value("int64"),
},
"prediction": datasets.Value("int64"),
},
"references": datasets.Value("int64"),
}
else:
return {
"predictions": datasets.Value("int64"),
"references": datasets.Value("int64"),
}
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase_ , lowerCAmelCase_)}
elif self.config_name == "cb":
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ , fa_avg="macro")
elif self.config_name == "record":
UpperCamelCase__ : List[Any] =[
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase__ : Any ={pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(lowerCAmelCase_ , lowerCAmelCase_)[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCAmelCase_ , lowerCAmelCase_)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]")
| 719 |
from __future__ import annotations
def _lowerCamelCase ( A_ : list[int] ) -> list[int]:
'''simple docstring'''
if len(A_ ) == 0:
return array
UpperCamelCase__ , UpperCamelCase__ : Dict =min(A_ ), max(A_ )
# Compute the variables
UpperCamelCase__ : Any =_max - _min + 1
UpperCamelCase__ , UpperCamelCase__ : int =[0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase__ : Optional[Any] =i - _min
UpperCamelCase__ : Union[str, Any] =i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase__ : Optional[int] =0
for i in range(A_ ):
while holes_repeat[i] > 0:
UpperCamelCase__ : Optional[int] =holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input("""Enter numbers separated by comma:\n""")
__UpperCAmelCase = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 582 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Any:
_A = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
_A = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
_A = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
_A = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_A = model(lowerCAmelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase_ , atol=1E-3 ) )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
_A = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
_A = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
_A = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_A = model(lowerCAmelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase_ , atol=1E-3 ) )
| 401 | from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_SCREAMING_SNAKE_CASE = HfArgumentParser(InitializationArguments)
_SCREAMING_SNAKE_CASE = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_SCREAMING_SNAKE_CASE = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 401 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = set()
__UpperCAmelCase : Optional[Any] = []
def parse_line(_UpperCAmelCase : Dict ):
for line in fp:
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase : List[str] = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(_lowercase ) > 0:
__UpperCAmelCase : List[str] = '\n'.join(_lowercase )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(_lowercase )
buffer.clear()
continue
else:
__UpperCAmelCase : Any = line.strip()
buffer.append(_lowercase )
if from_gh:
for filename in os.listdir(_lowercase ):
__UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase )
if not os.path.isdir(_lowercase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowercase ) as fp:
parse_line(_lowercase )
else:
try:
with zipfile.ZipFile(_lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowercase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowercase ) as fp:
parse_line(_lowercase )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def a ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = set()
__UpperCAmelCase : Any = [os.path.join(_lowercase , _lowercase ) for p in os.listdir(_lowercase ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowercase , _lowercase ) )
return selected_warnings
if __name__ == "__main__":
def a ( _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
return values.split(''',''' )
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
__A =parser.parse_args()
__A =args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__A =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__A =extract_warnings(args.output_dir, args.targets)
__A =sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 704 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
__UpperCAmelCase : Dict = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__UpperCAmelCase : int = model(a_ )['''last_hidden_state''']
__UpperCAmelCase : Optional[int] = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , a_ )
# compare the actual values for a slice.
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 241 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def snake_case_ ( A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : int = u
for i in range(1, A_ ):
_lowerCamelCase : int = temp * (u - i)
return temp
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = int(input('''enter the numbers of values: ''' ) )
_lowerCamelCase : list[list[float]] = []
for _ in range(A_ ):
y.append([] )
for i in range(A_ ):
for j in range(A_ ):
y[i].append(A_ )
_lowerCamelCase : str = 0
print('''enter the values of parameters in a list: ''' )
_lowerCamelCase : Tuple = list(map(A_, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(A_ ):
_lowerCamelCase : Any = float(input() )
_lowerCamelCase : Any = int(input('''enter the value to interpolate: ''' ) )
_lowerCamelCase : Optional[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, A_ ):
for j in range(n - i ):
_lowerCamelCase : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
_lowerCamelCase : str = y[0][0]
for i in range(1, A_ ):
summ += (ucal(A_, A_ ) * y[0][i]) / math.factorial(A_ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 83 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "openai/whisper-base"
_UpperCAmelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_UpperCAmelCase = "transcriber"
_UpperCAmelCase = WhisperProcessor
_UpperCAmelCase = WhisperForConditionalGeneration
_UpperCAmelCase = ["audio"]
_UpperCAmelCase = ["text"]
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Union[str, Any] ) -> Dict:
return self.pre_processor(UpperCamelCase , return_tensors='pt' ).input_features
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Optional[int] ) -> Optional[int]:
return self.model.generate(inputs=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[str] ) -> str:
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0]
| 328 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] =StableDiffusionPanoramaPipeline
__a : Union[str, Any] =TEXT_TO_IMAGE_PARAMS
__a : Union[str, Any] =TEXT_TO_IMAGE_BATCH_PARAMS
__a : List[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
__a : List[str] =TEXT_TO_IMAGE_IMAGE_PARAMS
def __snake_case ( self ):
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase = DDIMScheduler()
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(UpperCAmelCase_ )
lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase = sd_pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __snake_case ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 )
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase = '''french fries'''
lowerCAmelCase = sd_pipe(**UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase = sd_pipe(**UpperCAmelCase_ , view_batch_size=2 )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase = sd_pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=UpperCAmelCase_ )
lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase = sd_pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self , UpperCAmelCase_=0 ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self ):
lowerCAmelCase = '''stabilityai/stable-diffusion-2-base'''
lowerCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder='''scheduler''' )
lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCAmelCase = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __snake_case ( self ):
lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=UpperCAmelCase_ )
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCAmelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __snake_case ( self ):
lowerCAmelCase = 0
def callback_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowerCAmelCase = False
lowerCAmelCase = '''stabilityai/stable-diffusion-2-base'''
lowerCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder='''scheduler''' )
lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
lowerCAmelCase = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
pipe(**UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = '''stabilityai/stable-diffusion-2-base'''
lowerCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder='''scheduler''' )
lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
lowerCAmelCase = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 713 |
from collections.abc import Sequence
def UpperCAmelCase ( _snake_case , _snake_case = False ):
if not arr:
return 0
lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase = 0.0
for num in arr:
lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase = max(_snake_case , _snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 33 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase__ : list[list[str]] = [[] for _ in range(lowerCamelCase_)]
UpperCamelCase__ : Optional[int] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative')
if key == 1 or len(lowerCamelCase_) <= key:
return input_string
for position, character in enumerate(lowerCamelCase_):
UpperCamelCase__ : str = position % (lowest * 2) # puts it in bounds
UpperCamelCase__ : Union[str, Any] = min(lowerCamelCase_ , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase_)
UpperCamelCase__ : str = [''.join(lowerCamelCase_) for row in temp_grid]
UpperCamelCase__ : Any = ''.join(lowerCamelCase_)
return output_string
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Optional[Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative')
if key == 1:
return input_string
UpperCamelCase__ : list[list[str]] = [[] for _ in range(lowerCamelCase_)] # generates template
for position in range(len(lowerCamelCase_)):
UpperCamelCase__ : Any = position % (lowest * 2) # puts it in bounds
UpperCamelCase__ : Optional[int] = min(lowerCamelCase_ , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append('*')
UpperCamelCase__ : Tuple = 0
for row in temp_grid: # fills in the characters
UpperCamelCase__ : Tuple = input_string[counter : counter + len(lowerCamelCase_)]
grid.append(list(lowerCamelCase_))
counter += len(lowerCamelCase_)
UpperCamelCase__ : Tuple = '' # reads as zigzag
for position in range(len(lowerCamelCase_)):
UpperCamelCase__ : Optional[int] = position % (lowest * 2) # puts it in bounds
UpperCamelCase__ : List[str] = min(lowerCamelCase_ , lowest * 2 - num) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0)
return output_string
def __UpperCAmelCase ( lowerCamelCase_) -> dict[int, str]:
UpperCamelCase__ : Optional[int] = {}
for key_guess in range(1 , len(lowerCamelCase_)): # tries every key
UpperCamelCase__ : int = decrypt(lowerCamelCase_ , lowerCamelCase_)
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 596 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = field(default_factory=__lowerCamelCase )
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Tensor):
UpperCamelCase__ : str = len(list(m.modules())) == 1 or isinstance(UpperCAmelCase_ , nn.Convad) or isinstance(UpperCAmelCase_ , nn.BatchNormad)
if has_not_submodules:
self.traced.append(UpperCAmelCase_)
def __call__( self : List[str] , UpperCAmelCase_ : Tensor):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(UpperCAmelCase_)
[x.remove() for x in self.handles]
return self
@property
def __UpperCamelCase ( self : Union[str, Any]):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase_: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 1
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = True
def __call__( self : Optional[Any] , UpperCAmelCase_ : Tensor):
UpperCamelCase__ : int = Tracker(self.dest)(UpperCAmelCase_).parametrized
UpperCamelCase__ : Any = Tracker(self.src)(UpperCAmelCase_).parametrized
UpperCamelCase__ : Any = list(filter(lambda UpperCAmelCase_: type(UpperCAmelCase_) not in self.src_skip , UpperCAmelCase_))
UpperCamelCase__ : Optional[int] = list(filter(lambda UpperCAmelCase_: type(UpperCAmelCase_) not in self.dest_skip , UpperCAmelCase_))
if len(UpperCAmelCase_) != len(UpperCAmelCase_) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(UpperCAmelCase_)} operations while'
F' destination module has {len(UpperCAmelCase_)}.')
for dest_m, src_m in zip(UpperCAmelCase_ , UpperCAmelCase_):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}')
class __lowercase (nn.Module ):
def __init__( self : Any , UpperCAmelCase_ : nn.Module):
super().__init__()
UpperCamelCase__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block'), F'Unexpected layer name {k}'
UpperCamelCase__ : Optional[Any] = len(UpperCAmelCase_) + 1
feature_blocks.append((F'res{block_index}', v))
UpperCamelCase__ : Any = nn.ModuleDict(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Tensor):
return get_trunk_forward_outputs(
UpperCAmelCase_ , out_feat_keys=UpperCAmelCase_ , feature_blocks=self._feature_blocks , )
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : str):
UpperCamelCase__ : int = x.split('-')
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : str):
# default to timm!
if x not in self:
UpperCamelCase__ : List[Any] = self.convert_name_to_timm(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = partial(lambda: (timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_).eval(), None))
else:
UpperCamelCase__ : List[str] = super().__getitem__(UpperCAmelCase_)
return val
class __lowercase (__lowerCamelCase ):
def __getitem__( self : Tuple , UpperCAmelCase_ : str):
if "seer" in x and "in1k" not in x:
UpperCamelCase__ : Optional[Any] = RegNetModel
else:
UpperCamelCase__ : Optional[Any] = RegNetForImageClassification
return val
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
for from_key, to_key in keys:
UpperCamelCase__ : str = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}')
return to_state_dict
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> List[Any]:
print(f'Converting {name}...')
with torch.no_grad():
UpperCamelCase__, UpperCamelCase__ : Any = from_model_func()
UpperCamelCase__ : int = our_model_func(lowerCamelCase_).eval()
UpperCamelCase__ : Union[str, Any] = ModuleTransfer(src=lowerCamelCase_ , dest=lowerCamelCase_ , raise_if_mismatch=lowerCamelCase_)
UpperCamelCase__ : Dict = torch.randn((1, 3, 224, 224))
module_transfer(lowerCamelCase_)
if from_state_dict is not None:
UpperCamelCase__ : Any = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCamelCase__ : Optional[int] = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
UpperCamelCase__ : Optional[Any] = manually_copy_vissl_head(lowerCamelCase_ , our_model.state_dict() , lowerCamelCase_)
our_model.load_state_dict(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = our_model(lowerCamelCase_ , output_hidden_states=lowerCamelCase_)
UpperCamelCase__ : Dict = (
our_outputs.logits if isinstance(lowerCamelCase_ , lowerCamelCase_) else our_outputs.last_hidden_state
)
UpperCamelCase__ : Optional[int] = from_model(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = from_output[-1] if type(lowerCamelCase_) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCamelCase__ : Any = our_outputs.hidden_states[-1]
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = 224 if 'seer' not in name else 384
# we can use the convnext one
UpperCamelCase__ : int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowerCamelCase_)
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
print(f'Pushed {name}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True) -> int:
UpperCamelCase__ : Any = 'imagenet-1k-id2label.json'
UpperCamelCase__ : int = 1_000
UpperCamelCase__ : Tuple = (1, num_labels)
UpperCamelCase__ : Dict = 'huggingface/label-files'
UpperCamelCase__ : str = num_labels
UpperCamelCase__ : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : Dict = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Dict = idalabel
UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : int = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_)
UpperCamelCase__ : Tuple = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x'),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x'),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x'),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x'),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x'),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='x'),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='x'),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='x'),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='x'),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='x'),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='x'),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='x'),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010),
}
UpperCamelCase__ : Dict = NameToOurModelFuncMap()
UpperCamelCase__ : Optional[int] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCamelCase_ , lowerCamelCase_) -> Tuple[nn.Module, Dict]:
UpperCamelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , model_dir=str(lowerCamelCase_) , map_location='cpu')
UpperCamelCase__ : List[str] = model_func()
# check if we have a head, if yes add it
UpperCamelCase__ : str = files['classy_state_dict']['base_model']['model']
UpperCamelCase__ : str = model_state_dict['trunk']
model.load_state_dict(lowerCamelCase_)
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCamelCase__ : Dict = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Any = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : int = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf()) , )
UpperCamelCase__ : List[str] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52))) , )
# IN1K finetuned
UpperCamelCase__ : Tuple = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Optional[Any] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Any = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf()) , )
UpperCamelCase__ : Tuple = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52))) , )
if model_name:
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 596 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if len(__a ) < k or k < 0:
raise ValueError('Invalid Input' )
snake_case_ : Tuple = sum(array[:k] )
for i in range(len(__a ) - k ):
snake_case_ : Tuple = current_sum - array[i] + array[i + k]
snake_case_ : Union[str, Any] = max(__a , __a )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_SCREAMING_SNAKE_CASE = [randint(-10_00, 10_00) for i in range(1_00)]
_SCREAMING_SNAKE_CASE = randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 534 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@property
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.dummy_uncond_unet
snake_case_ : Optional[Any] = ScoreSdeVeScheduler()
snake_case_ : Tuple = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Tuple = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_A ).images
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : Optional[int] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_A , return_dict=_A )[
0
]
snake_case_ : List[str] = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict ) -> Dict:
"""simple docstring"""
snake_case_ : Dict = 'google/ncsnpp-church-256'
snake_case_ : List[Any] = UNetaDModel.from_pretrained(_A )
snake_case_ : str = ScoreSdeVeScheduler.from_pretrained(_A )
snake_case_ : Optional[Any] = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
snake_case_ : Any = torch.manual_seed(0 )
snake_case_ : Optional[int] = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=_A ).images
snake_case_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ : Dict = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 534 | 1 |
import argparse
UpperCamelCase_ : str = """docs/source/_static/js/custom.js"""
def UpperCamelCase ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
with open(_UpperCAmelCase , encoding="utf-8" , newline="\n" ) as f:
_lowercase : List[Any] = f.readlines()
_lowercase : str = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
_lowercase : Tuple = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(_UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
UpperCamelCase_ : Tuple = parser.parse_args()
update_custom_js(args.version)
| 461 |
def UpperCamelCase ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_lowercase : List[str] = [int(_UpperCAmelCase ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(_UpperCAmelCase ) == 4 and all(0 <= int(_UpperCAmelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
UpperCamelCase_ : Dict = input().strip()
UpperCamelCase_ : List[Any] = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 461 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=99 , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=9 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase=8 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0_02 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=None , _lowerCAmelCase=None , ):
a =parent
a =batch_size
a =encoder_seq_length
a =decoder_seq_length
# For common tests
a =self.decoder_seq_length
a =is_training
a =use_attention_mask
a =use_labels
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =d_ff
a =relative_attention_num_buckets
a =dropout_rate
a =initializer_factor
a =eos_token_id
a =pad_token_id
a =decoder_start_token_id
a =None
a =decoder_layers
def lowerCAmelCase__ ( self ):
return TaConfig.from_pretrained("""google/umt5-base""" )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , ):
if attention_mask is None:
a =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_lowerCAmelCase )
if decoder_head_mask is None:
a =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_lowerCAmelCase )
if cross_attn_head_mask is None:
a =torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase__ ( self ):
a =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
a =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a =input_ids.clamp(self.pad_token_id + 1 )
a =decoder_input_ids.clamp(self.pad_token_id + 1 )
a =self.get_config()
a =config.num_attention_heads
a =self.prepare_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, input_dict
def lowerCAmelCase__ ( self ):
a , a =self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
a =UMTaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a =model(
input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , )
a =model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
a =result.last_hidden_state
a =result.past_key_values
a =result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_lowerCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
a =UMTaModel(config=_lowerCAmelCase ).get_decoder().to(_lowerCAmelCase ).eval()
# first forward pass
a =model(_lowerCAmelCase , use_cache=_lowerCAmelCase )
a =model(_lowerCAmelCase )
a =model(_lowerCAmelCase , use_cache=_lowerCAmelCase )
self.parent.assertTrue(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) )
self.parent.assertTrue(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) + 1 )
a , a =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a =ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
a =torch.cat([input_ids, next_tokens] , dim=-1 )
a =model(_lowerCAmelCase )["""last_hidden_state"""]
a =model(_lowerCAmelCase , past_key_values=_lowerCAmelCase )["""last_hidden_state"""]
# select random slice
a =ids_tensor((1,) , output_from_past.shape[-1] ).item()
a =output_from_no_past[:, -1, random_slice_idx].detach()
a =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , ):
a =UMTaModel(config=_lowerCAmelCase ).to(_lowerCAmelCase ).half().eval()
a =model(**_lowerCAmelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(_lowerCAmelCase ).any().item() )
@require_torch
class UpperCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : List[Any] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_SCREAMING_SNAKE_CASE : Dict = [0.8, 0.9]
def lowerCAmelCase__ ( self ):
a =UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def lowerCAmelCase__ ( self ):
a =self.model_tester.prepare_config_and_inputs()
a =UMTaModel(config_and_inputs[0] ).to(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_lowerCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=_lowerCAmelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowerCAmelCase__ ( self ):
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
a =["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
a =self.model_tester.prepare_config_and_inputs()
a =config_and_inputs[0]
a =UMTaForConditionalGeneration(_lowerCAmelCase ).eval()
model.to(_lowerCAmelCase )
a ={
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=_lowerCAmelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_lowerCAmelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_lowerCAmelCase ),
}
for attn_name, (name, mask) in zip(_lowerCAmelCase , head_masking.items() ):
a ={name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
a =torch.ones(
config.num_decoder_layers , config.num_heads , device=_lowerCAmelCase )
a =model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_lowerCAmelCase , return_dict_in_generate=_lowerCAmelCase , **_lowerCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
a =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def lowerCAmelCase__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def lowerCAmelCase__ ( self ):
a =UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_lowerCAmelCase ).to(_lowerCAmelCase )
a =AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_lowerCAmelCase , legacy=_lowerCAmelCase )
a =[
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
a =tokenizer(_lowerCAmelCase , return_tensors="""pt""" , padding=_lowerCAmelCase ).input_ids
# fmt: off
a =torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_lowerCAmelCase , _lowerCAmelCase )
a =model.generate(input_ids.to(_lowerCAmelCase ) )
a =[
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
a =tokenizer.batch_decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 321 |
from __future__ import annotations
from collections.abc import Callable
def lowerCamelCase ( UpperCAmelCase_ : Callable[[int | float], int | float] , UpperCAmelCase_ : int | float , UpperCAmelCase_ : int | float , UpperCAmelCase_ : int = 100 , )-> float:
"""simple docstring"""
a =x_start
a =fnc(UpperCAmelCase_ )
a =0.0
for _ in range(UpperCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
a =(x_end - x_start) / steps + xa
a =fnc(UpperCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
a =xa
a =fxa
return area
if __name__ == "__main__":
def lowerCamelCase ( UpperCAmelCase_ : List[str] )-> Optional[Any]:
"""simple docstring"""
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowerCamelCase = 10
while i <= 100000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 321 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
_lowercase: Any = MBartConfig
_lowercase: Any = {}
_lowercase: str = '''gelu'''
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Any=True , __snake_case : Optional[int]=False , __snake_case : Union[str, Any]=99 , __snake_case : str=32 , __snake_case : Dict=2 , __snake_case : Dict=4 , __snake_case : List[Any]=37 , __snake_case : str=0.1 , __snake_case : int=0.1 , __snake_case : str=20 , __snake_case : Optional[int]=2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , ) -> int:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
def lowercase__ ( self : Optional[int] ) -> Any:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCAmelCase = prepare_mbart_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def lowercase__ ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Tuple ) -> Any:
_lowerCAmelCase = TFMBartModel(config=__snake_case ).get_decoder()
_lowerCAmelCase = inputs_dict["""input_ids"""]
_lowerCAmelCase = input_ids[:1, :]
_lowerCAmelCase = inputs_dict["""attention_mask"""][:1, :]
_lowerCAmelCase = inputs_dict["""head_mask"""]
_lowerCAmelCase = 1
# first forward pass
_lowerCAmelCase = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case )
_lowerCAmelCase , _lowerCAmelCase = outputs.to_tuple()
_lowerCAmelCase = past_key_values[1]
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Dict = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_lowercase: Optional[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_lowercase: List[str] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase: Optional[int] = True
_lowercase: List[Any] = False
_lowercase: str = False
def lowercase__ ( self : Optional[int] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Dict ) -> Optional[Any]:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowercase__ ( self : Optional[int] ) -> str:
_lowerCAmelCase = TFMBartModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__snake_case )
def lowercase__ ( self : str ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
_lowercase: List[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
_lowercase: int = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
_lowercase: List[str] = '''facebook/mbart-large-en-ro'''
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
_lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowercase__ ( self : List[str] , **__snake_case : Optional[Any] ) -> Union[str, Any]:
_lowerCAmelCase = self.translate_src_text(**__snake_case )
self.assertListEqual(self.expected_text , __snake_case )
def lowercase__ ( self : int , **__snake_case : Any ) -> List[Any]:
_lowerCAmelCase = self.tokenizer(self.src_text , **__snake_case , return_tensors="""tf""" )
_lowerCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_lowerCAmelCase = self.tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
return generated_words
@slow
def lowercase__ ( self : Tuple ) -> List[str]:
self._assert_generated_batch_equal_expected()
| 207 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Any =None
A__ : Optional[int] =logging.get_logger(__name__)
A__ : Union[str, Any] ={'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[str] ={
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
A__ : List[str] ={
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
A__ : List[str] ='''▁'''
# Segments (not really needed)
A__ : str =0
A__ : str =1
A__ : List[Any] =2
A__ : str =3
A__ : Optional[Any] =4
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[int] = VOCAB_FILES_NAMES
_lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Optional[Any] = '''left'''
_lowercase: Dict = XLNetTokenizer
def __init__( self : List[str] , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : Union[str, Any]=False , __snake_case : str=True , __snake_case : Union[str, Any]=False , __snake_case : List[Any]="<s>" , __snake_case : List[Any]="</s>" , __snake_case : str="<unk>" , __snake_case : int="<sep>" , __snake_case : int="<pad>" , __snake_case : Dict="<cls>" , __snake_case : int="<mask>" , __snake_case : Optional[int]=["<eop>", "<eod>"] , **__snake_case : List[str] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
def lowercase__ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase__ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 207 | 1 |
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class SCREAMING_SNAKE_CASE( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
"""simple docstring"""
def __init__( self : str , __snake_case : Dict=None , **__snake_case : Optional[int] ) -> Optional[int]:
super().__init__(features=__snake_case )
UpperCAmelCase : Dict = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A ( self : Tuple , __snake_case : List[str] ) -> Dict:
import torch
if isinstance(__snake_case , __snake_case ) and column:
if all(
isinstance(__snake_case , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__snake_case )
return column
def A ( self : List[Any] , __snake_case : List[Any] ) -> Tuple:
import torch
if isinstance(__snake_case , (str, bytes, type(__snake_case )) ):
return value
elif isinstance(__snake_case , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : Tuple = {}
if isinstance(__snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCAmelCase : Any = {'''dtype''': torch.intaa}
elif isinstance(__snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Tuple = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__snake_case , PIL.Image.Image ):
UpperCAmelCase : List[Any] = np.asarray(__snake_case )
return torch.tensor(__snake_case , **{**default_dtype, **self.torch_tensor_kwargs} )
def A ( self : Union[str, Any] , __snake_case : int ) -> Union[str, Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(__snake_case , '''__array__''' ) and not isinstance(__snake_case , torch.Tensor ):
UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__snake_case , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__snake_case ) for substruct in data_struct] )
elif isinstance(__snake_case , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__snake_case ) for substruct in data_struct] )
return self._tensorize(__snake_case )
def A ( self : Union[str, Any] , __snake_case : dict ) -> Any:
return map_nested(self._recursive_tensorize , __snake_case , map_list=__snake_case )
def A ( self : str , __snake_case : pa.Table ) -> Mapping:
UpperCAmelCase : Any = self.numpy_arrow_extractor().extract_row(__snake_case )
UpperCAmelCase : Tuple = self.python_features_decoder.decode_row(__snake_case )
return self.recursive_tensorize(__snake_case )
def A ( self : List[Any] , __snake_case : pa.Table ) -> "torch.Tensor":
UpperCAmelCase : Any = self.numpy_arrow_extractor().extract_column(__snake_case )
UpperCAmelCase : Any = self.python_features_decoder.decode_column(__snake_case , pa_table.column_names[0] )
UpperCAmelCase : Optional[Any] = self.recursive_tensorize(__snake_case )
UpperCAmelCase : Tuple = self._consolidate(__snake_case )
return column
def A ( self : int , __snake_case : pa.Table ) -> Mapping:
UpperCAmelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(__snake_case )
UpperCAmelCase : Any = self.python_features_decoder.decode_batch(__snake_case )
UpperCAmelCase : Dict = self.recursive_tensorize(__snake_case )
for column_name in batch:
UpperCAmelCase : Union[str, Any] = self._consolidate(batch[column_name] )
return batch
| 709 |
'''simple docstring'''
import numpy as np
UpperCamelCase__: Any = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Any ) -> None:
UpperCAmelCase : Optional[int] = np.array(__snake_case )
def A ( self : str , __snake_case : str ) -> np.ndarray:
UpperCAmelCase , UpperCAmelCase : Dict = np.where(letter == self.SQUARE )
UpperCAmelCase : Dict = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def A ( self : Union[str, Any] , __snake_case : int , __snake_case : int ) -> str:
UpperCAmelCase : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def A ( self : List[Any] , __snake_case : str ) -> str:
UpperCAmelCase : Any = message.lower()
UpperCAmelCase : List[Any] = message.replace(''' ''' , '''''' )
UpperCAmelCase : List[Any] = message.replace('''j''' , '''i''' )
UpperCAmelCase : Tuple = np.empty((2, len(__snake_case )) )
for letter_index in range(len(__snake_case ) ):
UpperCAmelCase : str = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase : Any = numbers[0]
UpperCAmelCase : Optional[Any] = numbers[1]
UpperCAmelCase : List[str] = first_step.reshape(2 * len(__snake_case ) )
UpperCAmelCase : Union[str, Any] = ''''''
for numbers_index in range(len(__snake_case ) ):
UpperCAmelCase : Dict = int(second_step[numbers_index * 2] )
UpperCAmelCase : Union[str, Any] = int(second_step[(numbers_index * 2) + 1] )
UpperCAmelCase : Dict = self.numbers_to_letter(__snake_case , __snake_case )
UpperCAmelCase : int = encoded_message + letter
return encoded_message
def A ( self : Optional[Any] , __snake_case : str ) -> str:
UpperCAmelCase : str = message.lower()
message.replace(''' ''' , '''''' )
UpperCAmelCase : Optional[int] = np.empty(2 * len(__snake_case ) )
for letter_index in range(len(__snake_case ) ):
UpperCAmelCase : int = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase : Optional[Any] = numbers[0]
UpperCAmelCase : List[Any] = numbers[1]
UpperCAmelCase : Tuple = first_step.reshape((2, len(__snake_case )) )
UpperCAmelCase : str = ''''''
for numbers_index in range(len(__snake_case ) ):
UpperCAmelCase : Tuple = int(second_step[0, numbers_index] )
UpperCAmelCase : List[str] = int(second_step[1, numbers_index] )
UpperCAmelCase : Union[str, Any] = self.numbers_to_letter(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = decoded_message + letter
return decoded_message
| 528 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
raise NotImplementedError()
@abstractmethod
def __UpperCamelCase ( self ):
raise NotImplementedError()
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 1 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase__ : Tuple = '''path-to-your-trained-model'''
lowercase__ : Tuple = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase__ : Union[str, Any] = '''A photo of sks dog in a bucket'''
lowercase__ : Dict = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''') | 708 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : Optional[Any] = list(poly_a or [0])[:]
__A : str = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Any = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Any = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : Dict = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : List[Any] = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : str = self.c_max_length // 2
while next_ncol > 0:
__A : Dict = [[] for i in range(_UpperCAmelCase)]
__A : List[str] = self.root**next_ncol
# First half of next step
__A : Dict = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : str = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Union[str, Any] = new_dft
__A : List[str] = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.__dft('A')
__A : Tuple = self.__dft('B')
__A : Tuple = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Union[str, Any] = 2
while next_ncol <= self.c_max_length:
__A : Tuple = [[] for i in range(_UpperCAmelCase)]
__A : Optional[Any] = self.root ** (next_ncol // 2)
__A : int = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : Union[str, Any] = new_inverse_c
next_ncol *= 2
# Unpack
__A : str = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : Optional[int] = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 338 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
SCREAMING_SNAKE_CASE__ = 25_0004
SCREAMING_SNAKE_CASE__ = 25_0020
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = MBartTokenizer
__SCREAMING_SNAKE_CASE : Tuple = MBartTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Tuple = True
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : Any = MBartTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[Any] = MBartTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__a : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__a : List[str] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__a : Tuple = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Dict = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : str = tempfile.mkdtemp()
__a : Any = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Dict = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a : List[str] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
__a : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
__a : Optional[Any] = tempfile.mkdtemp()
__a : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
__a : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
__a : Optional[Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
__a : Tuple = tempfile.mkdtemp()
__a : List[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
__a : Tuple = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a : Union[str, Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = '''facebook/mbart-large-en-ro'''
__SCREAMING_SNAKE_CASE : Optional[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__SCREAMING_SNAKE_CASE : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__SCREAMING_SNAKE_CASE : Optional[int] = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE]
@classmethod
def __lowerCAmelCase ( cls : List[str] ):
'''simple docstring'''
__a : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
__a : Dict = 1
return cls
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 2_5_0_0_2_0 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
__a : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
__a : Union[str, Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Any = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE__ )
__a : int = 1_0
__a : Dict = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Tuple = tempfile.mkdtemp()
__a : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__a : Any = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
__a : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : str = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors='pt' )
__a : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=1_0 , return_tensors='pt' )
__a : Optional[Any] = targets['input_ids']
__a : Dict = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# A, test, EOS, en_XX
'input_ids': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_0_0_0_1,
} , )
| 47 |
'''simple docstring'''
import argparse
import os
import re
__snake_case : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
__snake_case : Optional[Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__snake_case : Tuple = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__snake_case : Dict = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__snake_case : Union[str, Any] = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__snake_case : Any = re.compile(r'\[([^\]]+)\]')
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
A__ : Optional[int] =_re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def __lowerCamelCase ( __snake_case : str, __snake_case : Union[str, Any]="", __snake_case : Tuple=None, __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
A__ : str =0
A__ : List[Any] =code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
A__ : Union[str, Any] =["""\n""".join(lines[:index] )]
else:
A__ : Tuple =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ : int =[lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__snake_case ) )
if index < len(__snake_case ) - 1:
A__ : Any =[lines[index + 1]]
index += 1
else:
A__ : List[str] =[]
else:
blocks.append("""\n""".join(__snake_case ) )
A__ : Any =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append("""\n""".join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __lowerCamelCase ( __snake_case : Dict ) -> Dict:
"""simple docstring"""
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace("""_""", """""" )
return _inner
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
def noop(__snake_case : int ):
return x
if key is None:
A__ : Optional[int] =noop
# Constants are all uppercase, they go first.
A__ : Tuple =[obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ : List[str] =[obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
A__ : Union[str, Any] =[obj for obj in objects if not key(__snake_case )[0].isupper()]
A__ : Union[str, Any] =ignore_underscore(__snake_case )
return sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case )
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def _replace(__snake_case : Any ):
A__ : str =match.groups()[0]
if "," not in imports:
return f"[{imports}]"
A__ : Tuple =[part.strip().replace("""\"""", """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : int =keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(__snake_case )] ) + "]"
A__ : int =import_statement.split("""\n""" )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ : Optional[int] =2 if lines[1].strip() == """[""" else 1
A__ : Optional[int] =[(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ : List[str] =sort_objects(__snake_case, key=lambda __snake_case : x[1] )
A__ : Tuple =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ : List[Any] =_re_bracket_content.sub(_replace, lines[1] )
else:
A__ : List[str] =[part.strip().replace("""\"""", """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : List[Any] =keys[:-1]
A__ : List[Any] =get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
A__ : Union[str, Any] =_re_bracket_content.sub(_replace, __snake_case )
return import_statement
def __lowerCamelCase ( __snake_case : List[str], __snake_case : str=True ) -> Optional[int]:
"""simple docstring"""
with open(__snake_case, """r""" ) as f:
A__ : str =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ : Any =split_code_in_indented_blocks(
__snake_case, start_prompt="""_import_structure = {""", end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ : Optional[Any] =main_blocks[block_idx]
A__ : Optional[Any] =block.split("""\n""" )
# Get to the start of the imports.
A__ : Optional[Any] =0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ : Dict =len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ : str ="""\n""".join(block_lines[line_idx:-1] )
A__ : Dict =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ : Dict =split_code_in_indented_blocks(__snake_case, indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ : int =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ : int =[(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ : str =[(i, key) for i, key in enumerate(__snake_case ) if key is not None]
A__ : Optional[int] =[x[0] for x in sorted(__snake_case, key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ : Optional[Any] =0
A__ : int =[]
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A__ : Union[str, Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
A__ : Any ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(__snake_case, """w""" ) as f:
f.write("""\n""".join(__snake_case ) )
def __lowerCamelCase ( __snake_case : Dict=True ) -> Any:
"""simple docstring"""
A__ : Optional[Any] =[]
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
A__ : Tuple =sort_imports(os.path.join(__snake_case, """__init__.py""" ), check_only=__snake_case )
if result:
A__ : str =[os.path.join(__snake_case, """__init__.py""" )]
if len(__snake_case ) > 0:
raise ValueError(f"Would overwrite {len(__snake_case )} files, run `make style`." )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__snake_case : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 215 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = {"target_lang": "fi", "source_lang": "en"}
_SCREAMING_SNAKE_CASE = ">>zh<<"
_SCREAMING_SNAKE_CASE = "Helsinki-NLP/"
if is_torch_available():
_SCREAMING_SNAKE_CASE = "pt"
elif is_tf_available():
_SCREAMING_SNAKE_CASE = "tf"
else:
_SCREAMING_SNAKE_CASE = "jax"
@require_sentencepiece
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =MarianTokenizer
__lowerCAmelCase : Union[str, Any] =False
__lowerCAmelCase : Tuple =True
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
super().setUp()
_lowercase =['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
_lowercase =dict(zip(snake_case, range(len(snake_case))))
_lowercase =Path(self.tmpdirname)
save_json(snake_case, save_dir / VOCAB_FILES_NAMES['vocab'])
save_json(snake_case, save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(snake_case, save_dir / VOCAB_FILES_NAMES['source_spm'])
copyfile(snake_case, save_dir / VOCAB_FILES_NAMES['target_spm'])
_lowercase =MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self :Optional[int], **snake_case :Optional[int]):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname, **snake_case)
def UpperCamelCase__ ( self :int, snake_case :List[Any]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase ='</s>'
_lowercase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case), snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case), snake_case)
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '</s>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '<pad>')
self.assertEqual(len(snake_case), 9)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 9)
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''')
_lowercase =en_de_tokenizer(['I am a small frog'], return_tensors=snake_case)
self.assertIsInstance(snake_case, snake_case)
_lowercase =[38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(snake_case, batch.input_ids[0])
_lowercase =tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(snake_case)
_lowercase =[x.name for x in Path(snake_case).glob('*')]
self.assertIn('source.spm', snake_case)
MarianTokenizer.from_pretrained(snake_case)
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.get_tokenizer()
_lowercase =tok(
['I am a small frog' * 1000, 'I am a small frog'], padding=snake_case, truncation=snake_case, return_tensors=snake_case)
self.assertIsInstance(snake_case, snake_case)
self.assertEqual(batch.input_ids.shape, (2, 512))
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =self.get_tokenizer()
_lowercase =tok(['I am a tiny frog', 'I am a small frog'], padding=snake_case, return_tensors=snake_case)
self.assertIsInstance(snake_case, snake_case)
self.assertEqual(batch_smaller.input_ids.shape, (2, 10))
@slow
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase ={'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case, model_name='Helsinki-NLP/opus-mt-en-de', revision='1a8c2263da11e68e50938f97e10cd57820bd504c', decode_kwargs={'use_source_tokenizer': True}, )
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs')
_lowercase ='Tämä on testi'
_lowercase ='This is a test'
_lowercase =[76, 7, 2047, 2]
_lowercase =[69, 12, 11, 940, 2]
_lowercase =tokenizer(snake_case).input_ids
self.assertListEqual(snake_case, snake_case)
_lowercase =tokenizer(text_target=snake_case).input_ids
self.assertListEqual(snake_case, snake_case)
_lowercase =tokenizer.decode(snake_case, skip_special_tokens=snake_case)
self.assertEqual(snake_case, snake_case)
| 709 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =1
__lowerCAmelCase : int =1
__lowerCAmelCase : bool =True
__lowerCAmelCase : bool =False
__lowerCAmelCase : bool =False
__lowerCAmelCase : bool =False
__lowerCAmelCase : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =[]
_lowercase =[]
for i in range(self.num_layers):
_lowercase =self.in_channels if i == 0 else self.out_channels
_lowercase =FlaxResnetBlockaD(
in_channels=snake_case, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(snake_case)
_lowercase =FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(snake_case)
_lowercase =resnets
_lowercase =attentions
if self.add_downsample:
_lowercase =FlaxDownsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self :str, snake_case :List[Any], snake_case :Optional[Any], snake_case :Tuple, snake_case :Dict=True):
"""simple docstring"""
_lowercase =()
for resnet, attn in zip(self.resnets, self.attentions):
_lowercase =resnet(snake_case, snake_case, deterministic=snake_case)
_lowercase =attn(snake_case, snake_case, deterministic=snake_case)
output_states += (hidden_states,)
if self.add_downsample:
_lowercase =self.downsamplers_a(snake_case)
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =1
__lowerCAmelCase : bool =True
__lowerCAmelCase : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =[]
for i in range(self.num_layers):
_lowercase =self.in_channels if i == 0 else self.out_channels
_lowercase =FlaxResnetBlockaD(
in_channels=snake_case, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(snake_case)
_lowercase =resnets
if self.add_downsample:
_lowercase =FlaxDownsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self :Union[str, Any], snake_case :List[str], snake_case :List[str], snake_case :Optional[Any]=True):
"""simple docstring"""
_lowercase =()
for resnet in self.resnets:
_lowercase =resnet(snake_case, snake_case, deterministic=snake_case)
output_states += (hidden_states,)
if self.add_downsample:
_lowercase =self.downsamplers_a(snake_case)
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =1
__lowerCAmelCase : int =1
__lowerCAmelCase : bool =True
__lowerCAmelCase : bool =False
__lowerCAmelCase : bool =False
__lowerCAmelCase : bool =False
__lowerCAmelCase : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =[]
_lowercase =[]
for i in range(self.num_layers):
_lowercase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowercase =self.prev_output_channel if i == 0 else self.out_channels
_lowercase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(snake_case)
_lowercase =FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(snake_case)
_lowercase =resnets
_lowercase =attentions
if self.add_upsample:
_lowercase =FlaxUpsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self :int, snake_case :List[str], snake_case :Union[str, Any], snake_case :Any, snake_case :Optional[Any], snake_case :Optional[int]=True):
"""simple docstring"""
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
_lowercase =res_hidden_states_tuple[-1]
_lowercase =res_hidden_states_tuple[:-1]
_lowercase =jnp.concatenate((hidden_states, res_hidden_states), axis=-1)
_lowercase =resnet(snake_case, snake_case, deterministic=snake_case)
_lowercase =attn(snake_case, snake_case, deterministic=snake_case)
if self.add_upsample:
_lowercase =self.upsamplers_a(snake_case)
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =1
__lowerCAmelCase : bool =True
__lowerCAmelCase : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =[]
for i in range(self.num_layers):
_lowercase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowercase =self.prev_output_channel if i == 0 else self.out_channels
_lowercase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(snake_case)
_lowercase =resnets
if self.add_upsample:
_lowercase =FlaxUpsampleaD(self.out_channels, dtype=self.dtype)
def __call__( self :Any, snake_case :Optional[Any], snake_case :Union[str, Any], snake_case :Optional[Any], snake_case :int=True):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_lowercase =res_hidden_states_tuple[-1]
_lowercase =res_hidden_states_tuple[:-1]
_lowercase =jnp.concatenate((hidden_states, res_hidden_states), axis=-1)
_lowercase =resnet(snake_case, snake_case, deterministic=snake_case)
if self.add_upsample:
_lowercase =self.upsamplers_a(snake_case)
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =1
__lowerCAmelCase : int =1
__lowerCAmelCase : bool =False
__lowerCAmelCase : bool =False
__lowerCAmelCase : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =[
FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
]
_lowercase =[]
for _ in range(self.num_layers):
_lowercase =FlaxTransformeraDModel(
in_channels=self.in_channels, n_heads=self.num_attention_heads, d_head=self.in_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(snake_case)
_lowercase =FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(snake_case)
_lowercase =resnets
_lowercase =attentions
def __call__( self :List[Any], snake_case :Tuple, snake_case :Union[str, Any], snake_case :List[Any], snake_case :Any=True):
"""simple docstring"""
_lowercase =self.resnets[0](snake_case, snake_case)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
_lowercase =attn(snake_case, snake_case, deterministic=snake_case)
_lowercase =resnet(snake_case, snake_case, deterministic=snake_case)
return hidden_states
| 557 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( a_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [int(__a ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(__a ) == 4 and all(0 <= int(__a ) <= 254 for octet in octets )
if __name__ == "__main__":
_lowerCAmelCase :Dict = input().strip()
_lowerCAmelCase :Optional[int] = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 251 |
from math import pow
def __magic_name__ ( __a : int , __a : int , __a : int , __a : int , __a : int , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCamelCase__ = int(pow(__a , __a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCamelCase__ , UpperCamelCase__ = backtrack(
__a , __a , current_number + 1 , __a , __a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCamelCase__ , UpperCamelCase__ = backtrack(
__a , __a , current_number + 1 , __a , __a )
return current_sum, solutions_count
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(__a , __a , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict ,__A : Union[str, Any] ,__A : Optional[Any]=13 ,__A : List[str]=30 ,__A : List[str]=2 ,__A : Dict=3 ,__A : Tuple=True ,__A : Optional[int]=True ,__A : Any=32 ,__A : List[str]=5 ,__A : Optional[int]=4 ,__A : List[Any]=37 ,__A : int="gelu" ,__A : int=0.1 ,__A : Optional[int]=0.1 ,__A : Union[str, Any]=10 ,__A : Tuple=0.02 ,) -> Union[str, Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = type_sequence_label_size
_lowercase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase = (image_size // patch_size) ** 2
_lowercase = num_patches + 1
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_a ,initializer_range=self.initializer_range ,)
return config, pixel_values
def __UpperCAmelCase ( self : Optional[int] ,__A : Tuple ,__A : Tuple ) -> Optional[int]:
_lowercase = FlaxViTModel(config=_a )
_lowercase = model(_a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_lowercase = (self.image_size, self.image_size)
_lowercase = (self.patch_size, self.patch_size)
_lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] ,__A : Any ,__A : Union[str, Any] ) -> int:
_lowercase = self.type_sequence_label_size
_lowercase = FlaxViTForImageClassification(config=_a )
_lowercase = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase = 1
_lowercase = FlaxViTForImageClassification(_a )
_lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase = model(_a )
def __UpperCAmelCase ( self : int ) -> Any:
_lowercase = self.prepare_config_and_inputs()
(
_lowercase
) = config_and_inputs
_lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class A_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
_lowercase = FlaxViTModelTester(self )
_lowercase = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[Any] ) -> int:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
_lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(_a )
_lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_a )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
_lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase = self._prepare_for_class(_a ,_a )
_lowercase = model_class(_a )
@jax.jit
def model_jitted(__A : Dict ,**__A : Dict ):
return model(pixel_values=_a ,**_a )
with self.subTest('JIT Enabled' ):
_lowercase = model_jitted(**_a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase = model_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) ,len(_a ) )
for jitted_output, output in zip(_a ,_a ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
for model_class_name in self.all_model_classes:
_lowercase = model_class_name.from_pretrained('google/vit-base-patch16-224' )
_lowercase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_a ) | 705 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ,__A : int = 101 ) -> Optional[int]:
_lowercase = length
def __len__( self : List[Any] ) -> Any:
return self.length
def __getitem__( self : List[Any] ,__A : Optional[int] ) -> int:
return i
class A_ :
"""simple docstring"""
def __call__( self : str ,__A : Union[str, Any] ) -> Any:
return {"input_ids": torch.tensor(__A ), "labels": torch.tensor(__A )}
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ) -> Any:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowercase = nn.Linear(120 ,80 )
def __UpperCAmelCase ( self : Dict ,__A : Dict ,__A : Any=None ) -> Tuple:
if labels is not None:
return torch.tensor(0.0 ,device=input_ids.device ), input_ids
else:
return input_ids
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@require_torch_neuroncore
def __UpperCAmelCase ( self : int ) -> Any:
_lowercase = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = F"""--output_dir {output_dir}""".split()
_lowercase = ['torchrun'] + distributed_args + args
execute_subprocess_async(__A ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@require_torch_multi_gpu
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
_lowercase = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = F"""--output_dir {output_dir}""".split()
_lowercase = ['torchrun'] + distributed_args + args
execute_subprocess_async(__A ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
snake_case = HfArgumentParser((TrainingArguments,))
snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
snake_case = DummyDataset(dataset_length)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :EvalPrediction ) -> Dict:
_lowercase = list(range(len(snake_case__ ) ) )
_lowercase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
snake_case = 2
snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
snake_case = None | 535 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase : int = [8, 5, 9, 7]
_UpperCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =claim_vector
lowercase =allocated_resources_table
lowercase =maximum_claim_table
def _A( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A( self ):
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def _A( self , **snake_case_ ):
lowercase =self.__need()
lowercase =self.__allocated_resources_table
lowercase =self.__available_resources()
lowercase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowercase =False
for each_need in need_list:
lowercase =True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
lowercase =False
break
if execution:
lowercase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
lowercase =np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _A( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 710 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = AltDiffusionPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase__ : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
UpperCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
UpperCAmelCase__ : int = CLIPTextModel(_A )
UpperCAmelCase__ : str = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCAmelCase__ : Dict = 77
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Dict=0 ):
'''simple docstring'''
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : Optional[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase_ ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Optional[int] = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase__ : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : Dict = RobertaSeriesModelWithTransformation(_A )
UpperCAmelCase__ : str = text_encoder
UpperCAmelCase__ : Optional[Any] = AltDiffusionPipeline(**_A )
UpperCAmelCase__ : Any = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase__ : int = '''A photo of an astronaut'''
UpperCAmelCase__ : Dict = alt_pipe(**_A )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Union[str, Any] = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase__ : str = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : Dict = RobertaSeriesModelWithTransformation(_A )
UpperCAmelCase__ : Any = text_encoder
UpperCAmelCase__ : Optional[Any] = AltDiffusionPipeline(**_A )
UpperCAmelCase__ : Tuple = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Any = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Dict = alt_pipe(**_A )
UpperCAmelCase__ : int = output.images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Optional[int] = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_A )
UpperCAmelCase__ : Dict = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Optional[Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = alt_pipe([prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase__ : int = output.images
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Any = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
UpperCAmelCase__ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_A , safety_checker=_A )
UpperCAmelCase__ : List[Any] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : List[Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase__ : List[str] = torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = alt_pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='''numpy''' )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 312 | 0 |
from __future__ import annotations
from math import pi, sqrt
def A ( snake_case__ : float , snake_case__ : float ) -> Dict:
'''simple docstring'''
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Dict = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowercase_ : List[str] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowercase_ : int = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowercase_ : Union[str, Any] = tf_top_k_top_p_filtering(A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowercase_ : str = output[output != -float('''inf''' )]
lowercase_ : int = tf.cast(
tf.where(tf.not_equal(A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(A , A , rtol=1e-12 )
tf.debugging.assert_equal(A , A )
@require_tf
class _UpperCAmelCase ( unittest.TestCase , _A ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
SCREAMING_SNAKE_CASE_ : Tuple = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def A ( self : Any ) -> int:
# TF-only test: tf.saved_model export
lowercase_ : int = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : Tuple = 2
lowercase_ : Optional[Any] = 2
class _UpperCAmelCase ( tf.Module ):
def __init__( self : Optional[int] , A : int ) -> List[Any]:
super(A , self ).__init__()
lowercase_ : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=A , )
def A ( self : Optional[int] , A : str , A : List[Any] ) -> Any:
lowercase_ : Optional[Any] = self.model.generate(
input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , )
return {"sequences": outputs["sequences"]}
lowercase_ : Tuple = [[2, 0], [1_02, 1_03]]
lowercase_ : int = [[1, 0], [1, 1]]
lowercase_ : List[str] = DummyModel(model=A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} )
lowercase_ : Optional[int] = tf.saved_model.load(A ).signatures['''serving_default''']
for batch_size in range(1 , len(A ) + 1 ):
lowercase_ : Union[str, Any] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowercase_ : Dict = serving_func(**A )['''sequences''']
lowercase_ : Tuple = test_model.generate(**A , max_new_tokens=A )
tf.debugging.assert_equal(A , A )
@slow
def A ( self : Tuple ) -> List[str]:
# TF-only test: tf.saved_model export
lowercase_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : List[Any] = 1
lowercase_ : Dict = 2
class _UpperCAmelCase ( tf.Module ):
def __init__( self : Optional[int] , A : Any ) -> int:
super(A , self ).__init__()
lowercase_ : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=A , )
def A ( self : Any , A : List[Any] , A : Dict ) -> Tuple:
lowercase_ : int = self.model.generate(
input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , )
return {"sequences": outputs["sequences"]}
lowercase_ : int = [[2], [1_02, 1_03]]
lowercase_ : List[Any] = [[1], [1, 1]]
lowercase_ : List[Any] = DummyModel(model=A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} )
lowercase_ : List[Any] = tf.saved_model.load(A ).signatures['''serving_default''']
for input_row in range(len(A ) ):
lowercase_ : Tuple = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
lowercase_ : Optional[Any] = serving_func(**A )['''sequences''']
lowercase_ : str = test_model.generate(**A , max_new_tokens=A )
tf.debugging.assert_equal(A , A )
@slow
@require_tensorflow_text
def A ( self : Union[str, Any] ) -> str:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=A )
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ) -> Optional[Any]:
super().__init__()
lowercase_ : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(A , '''spiece.model''' ) , '''rb''' ).read() )
lowercase_ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def A ( self : Tuple , A : Dict , *A : Union[str, Any] , **A : Optional[Any] ) -> Dict:
lowercase_ : List[Any] = self.tokenizer.tokenize(A )
lowercase_ , lowercase_ : List[Any] = text.pad_model_inputs(
A , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowercase_ : Optional[int] = self.model.generate(input_ids=A , attention_mask=A )
return self.tokenizer.detokenize(A )
lowercase_ : Tuple = CompleteSentenceTransformer()
lowercase_ : Optional[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
lowercase_ : int = complete_model(A )
lowercase_ : List[Any] = tf.keras.Model(A , A )
keras_model.save(A )
def A ( self : int ) -> Optional[Any]:
# Has PT equivalent: this test relies on random sampling
lowercase_ : Union[str, Any] = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
lowercase_ : Any = 14
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : Optional[int] = '''Hello, my dog is cute and'''
lowercase_ : Dict = tokenizer(A , return_tensors='''tf''' )
lowercase_ : List[str] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : Any = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowercase_ : Union[str, Any] = model.generate(**A , eos_token_id=A , **A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowercase_ : int = [6_38, 1_98]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowercase_ : Any = model.generate(**A , eos_token_id=A , **A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def A ( self : Dict ) -> Optional[int]:
# Has PT equivalent: ample use of framework-specific code
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase_ : int = '''Hugging Face is a technology company based in New York and Paris.'''
lowercase_ : List[str] = bart_tokenizer(A , return_tensors='''tf''' ).input_ids
lowercase_ : Tuple = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase_ : Optional[int] = bart_model.generate(A ).numpy()
class _UpperCAmelCase ( _A ):
def A ( self : List[Any] , A : List[str] , A : Optional[Any]=None , **A : Any ) -> Any:
return super().call(A , **A )
lowercase_ : Union[str, Any] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase_ : List[Any] = bart_model.generate(A , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(A , A ) )
class _UpperCAmelCase ( bart_model.model.encoder.__class__ ):
def A ( self : int , A : List[str] , **A : int ) -> Tuple:
return super().call(A , **A )
lowercase_ : Dict = FakeEncoder(bart_model.config , bart_model.model.shared )
lowercase_ : Optional[Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowercase_ : int = bart_model.generate(A ).numpy()
with self.assertRaises(A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(A , foo='''bar''' )
| 231 | 0 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case (tf.keras.layers.Layer ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=1 ,UpperCAmelCase_=False ,**UpperCAmelCase_ ) -> str:
super().__init__(**UpperCAmelCase_ )
lowercase__ = vocab_size
lowercase__ = d_embed
lowercase__ = d_proj
lowercase__ = cutoffs + [vocab_size]
lowercase__ = [0] + self.cutoffs
lowercase__ = div_val
lowercase__ = self.cutoffs[0]
lowercase__ = len(self.cutoffs ) - 1
lowercase__ = self.shortlist_size + self.n_clusters
lowercase__ = keep_order
lowercase__ = []
lowercase__ = []
def _a ( self ,UpperCAmelCase_ ) -> Union[str, Any]:
if self.n_clusters > 0:
lowercase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) ,initializer="zeros" ,trainable=UpperCAmelCase_ ,name="cluster_weight" )
lowercase__ = self.add_weight(
shape=(self.n_clusters,) ,initializer="zeros" ,trainable=UpperCAmelCase_ ,name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowercase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) ,initializer="zeros" ,trainable=UpperCAmelCase_ ,name=F'''out_projs_._{i}''' ,)
self.out_projs.append(UpperCAmelCase_ )
else:
self.out_projs.append(UpperCAmelCase_ )
lowercase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) ,initializer="zeros" ,trainable=UpperCAmelCase_ ,name=F'''out_layers_._{i}_._weight''' ,)
lowercase__ = self.add_weight(
shape=(self.vocab_size,) ,initializer="zeros" ,trainable=UpperCAmelCase_ ,name=F'''out_layers_._{i}_._bias''' ,)
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowercase__ , lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ = self.d_embed // (self.div_val**i)
lowercase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) ,initializer="zeros" ,trainable=UpperCAmelCase_ ,name=F'''out_projs_._{i}''' )
self.out_projs.append(UpperCAmelCase_ )
lowercase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) ,initializer="zeros" ,trainable=UpperCAmelCase_ ,name=F'''out_layers_._{i}_._weight''' ,)
lowercase__ = self.add_weight(
shape=(r_idx - l_idx,) ,initializer="zeros" ,trainable=UpperCAmelCase_ ,name=F'''out_layers_._{i}_._bias''' ,)
self.out_layers.append((weight, bias) )
super().build(UpperCAmelCase_ )
@staticmethod
def _a ( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=None ) -> int:
lowercase__ = x
if proj is not None:
lowercase__ = tf.einsum("ibd,ed->ibe" ,UpperCAmelCase_ ,UpperCAmelCase_ )
return tf.einsum("ibd,nd->ibn" ,UpperCAmelCase_ ,UpperCAmelCase_ ) + b
@staticmethod
def _a ( UpperCAmelCase_ ,UpperCAmelCase_ ) -> str:
lowercase__ = shape_list(UpperCAmelCase_ )
lowercase__ = tf.range(lp_size[0] ,dtype=target.dtype )
lowercase__ = tf.stack([r, target] ,1 )
return tf.gather_nd(UpperCAmelCase_ ,UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=True ,UpperCAmelCase_=False ) -> Optional[int]:
lowercase__ = 0
if self.n_clusters == 0:
lowercase__ = self._logit(UpperCAmelCase_ ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] )
if target is not None:
lowercase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCAmelCase_ ,logits=UpperCAmelCase_ )
lowercase__ = tf.nn.log_softmax(UpperCAmelCase_ ,axis=-1 )
else:
lowercase__ = shape_list(UpperCAmelCase_ )
lowercase__ = []
lowercase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowercase__ , lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase__ = (target >= l_idx) & (target < r_idx)
lowercase__ = tf.where(UpperCAmelCase_ )
lowercase__ = tf.boolean_mask(UpperCAmelCase_ ,UpperCAmelCase_ ) - l_idx
if self.div_val == 1:
lowercase__ = self.out_layers[0][0][l_idx:r_idx]
lowercase__ = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase__ = self.out_layers[i][0]
lowercase__ = self.out_layers[i][1]
if i == 0:
lowercase__ = tf.concat([cur_W, self.cluster_weight] ,0 )
lowercase__ = tf.concat([cur_b, self.cluster_bias] ,0 )
lowercase__ = self._logit(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,self.out_projs[0] )
lowercase__ = tf.nn.log_softmax(UpperCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowercase__ = tf.boolean_mask(UpperCAmelCase_ ,UpperCAmelCase_ )
lowercase__ = self._gather_logprob(UpperCAmelCase_ ,UpperCAmelCase_ )
else:
lowercase__ = self._logit(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,self.out_projs[i] )
lowercase__ = tf.nn.log_softmax(UpperCAmelCase_ )
lowercase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCAmelCase_ )
if target is not None:
lowercase__ = tf.boolean_mask(UpperCAmelCase_ ,UpperCAmelCase_ )
lowercase__ = tf.boolean_mask(UpperCAmelCase_ ,UpperCAmelCase_ )
lowercase__ = self._gather_logprob(UpperCAmelCase_ ,UpperCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCAmelCase_ ,-cur_logprob ,shape_list(UpperCAmelCase_ ) )
lowercase__ = tf.concat(UpperCAmelCase_ ,axis=-1 )
if target is not None:
if return_mean:
lowercase__ = tf.reduce_mean(UpperCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCAmelCase_ ,name=self.name ,aggregation="mean" if return_mean else "" )
return out
| 539 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=100 ,UpperCAmelCase_=13 ,UpperCAmelCase_=30 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=32 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=10 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=None ,UpperCAmelCase_=[0, 1, 2, 3] ,) -> Any:
lowercase__ = parent
lowercase__ = 100
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = out_indices
lowercase__ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def _a ( self ) -> str:
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a ( self ) -> List[str]:
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCAmelCase_ ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[int]:
lowercase__ = BeitModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Tuple:
lowercase__ = BeitForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = self.type_sequence_label_size
lowercase__ = BeitForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = BeitForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = self.num_labels
lowercase__ = BeitForSemanticSegmentation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowercase__ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase__ = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _a ( self ) -> str:
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case (UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :Optional[Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ :Optional[int] = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ :Any = False
lowerCAmelCase__ :Optional[Any] = False
lowerCAmelCase__ :Optional[Any] = False
def _a ( self ) -> Union[str, Any]:
lowercase__ = BeitModelTester(self )
lowercase__ = ConfigTester(self ,config_class=UpperCAmelCase_ ,has_text_modality=UpperCAmelCase_ ,hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _a ( self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _a ( self ) -> int:
pass
def _a ( self ) -> Any:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ ,nn.Linear ) )
def _a ( self ) -> Tuple:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ )
def _a ( self ) -> Tuple:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _a ( self ) -> List[Any]:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def _a ( self ) -> Any:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def _a ( self ) -> str:
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ )
def _a ( self ) -> int:
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCAmelCase_ ), BeitForMaskedImageModeling]:
continue
lowercase__ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
lowercase__ = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ )
lowercase__ = model(**UpperCAmelCase_ ).loss
loss.backward()
def _a ( self ) -> str:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCAmelCase_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ = model_class(UpperCAmelCase_ )
model.gradient_checkpointing_enable()
model.to(UpperCAmelCase_ )
model.train()
lowercase__ = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ )
lowercase__ = model(**UpperCAmelCase_ ).loss
loss.backward()
def _a ( self ) -> int:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@slow
def _a ( self ) -> Optional[Any]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = BeitModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case (unittest.TestCase ):
@cached_property
def _a ( self ) -> Any:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _a ( self ) -> int:
lowercase__ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(UpperCAmelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).pixel_values.to(UpperCAmelCase_ )
# prepare bool_masked_pos
lowercase__ = torch.ones((1, 196) ,dtype=torch.bool ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(pixel_values=UpperCAmelCase_ ,bool_masked_pos=UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,UpperCAmelCase_ ,atol=1E-2 ) )
@slow
def _a ( self ) -> Optional[Any]:
lowercase__ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(UpperCAmelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
lowercase__ = 281
self.assertEqual(logits.argmax(-1 ).item() ,UpperCAmelCase_ )
@slow
def _a ( self ) -> Union[str, Any]:
lowercase__ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
UpperCAmelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 21_841) )
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
lowercase__ = 2_396
self.assertEqual(logits.argmax(-1 ).item() ,UpperCAmelCase_ )
@slow
def _a ( self ) -> Union[str, Any]:
lowercase__ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
lowercase__ = model.to(UpperCAmelCase_ )
lowercase__ = BeitImageProcessor(do_resize=UpperCAmelCase_ ,size=640 ,do_center_crop=UpperCAmelCase_ )
lowercase__ = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test" )
lowercase__ = Image.open(ds[0]["file"] )
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,UpperCAmelCase_ )
lowercase__ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
lowercase__ = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] ,device=UpperCAmelCase_ ,)
else:
lowercase__ = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] ,device=UpperCAmelCase_ ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
@slow
def _a ( self ) -> str:
lowercase__ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
lowercase__ = model.to(UpperCAmelCase_ )
lowercase__ = BeitImageProcessor(do_resize=UpperCAmelCase_ ,size=640 ,do_center_crop=UpperCAmelCase_ )
lowercase__ = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test" )
lowercase__ = Image.open(ds[0]["file"] )
lowercase__ = image_processor(images=UpperCAmelCase_ ,return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits.detach().cpu()
lowercase__ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ ,target_sizes=[(500, 300)] )
lowercase__ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,UpperCAmelCase_ )
lowercase__ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ )
lowercase__ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,UpperCAmelCase_ )
| 539 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 443 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=A__ , cache_dir=A__ )
_SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(A__ , os.listdir(A__ )[0] , """snapshots""" ) )]
_SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=A__ )
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(A__ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
_SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(A__ ) == num_samples
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=A__ )
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = 50
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ )
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = 50
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = 50
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=A__ , steps_offset=1 , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=A__ , safety_checker=A__ , )
_SCREAMING_SNAKE_CASE = scheduler.create_state()
_SCREAMING_SNAKE_CASE = scheduler_state
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE = 50
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = jax.random.split(A__ , A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(A__ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
_SCREAMING_SNAKE_CASE = jax.device_count()
_SCREAMING_SNAKE_CASE = num_samples * [prompt]
_SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0 ) , A__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ , )
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , jit=A__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
_SCREAMING_SNAKE_CASE = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=A__ , use_memory_efficient_attention=A__ , )
_SCREAMING_SNAKE_CASE = replicate(A__ )
_SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(A__ )
_SCREAMING_SNAKE_CASE = shard(A__ )
_SCREAMING_SNAKE_CASE = pipeline(A__ , A__ , A__ , jit=A__ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
_SCREAMING_SNAKE_CASE = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 718 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]:
super().__init__(self , **A__ )
_SCREAMING_SNAKE_CASE = repo_info
_SCREAMING_SNAKE_CASE = token
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]:
if not isinstance(self.repo_info , A__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha )
return fsspec.open(
A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase ( self , A__ , **A__ ) -> str:
self._get_dirs()
_SCREAMING_SNAKE_CASE = self._strip_protocol(A__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A__ )
def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]:
self._get_dirs()
_SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
_SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = p.parent
if root == path:
_SCREAMING_SNAKE_CASE = f
_SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 0 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''llama'''
lowerCAmelCase_ = ['''past_key_values''']
def __init__( self : Any , _A : Any=3_2000 , _A : Any=4096 , _A : Dict=1_1008 , _A : Dict=32 , _A : str=32 , _A : Any=None , _A : Tuple="silu" , _A : List[str]=2048 , _A : Optional[Any]=0.02 , _A : Tuple=1e-6 , _A : Any=True , _A : List[Any]=0 , _A : Optional[int]=1 , _A : Tuple=2 , _A : Dict=1 , _A : List[Any]=False , _A : Optional[Any]=None , **_A : Union[str, Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_key_value_heads
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Any = rms_norm_eps
__SCREAMING_SNAKE_CASE : str = pretraining_tp
__SCREAMING_SNAKE_CASE : List[str] = use_cache
__SCREAMING_SNAKE_CASE : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
__SCREAMING_SNAKE_CASE : Any = self.rope_scaling.get('''type''' , _A )
__SCREAMING_SNAKE_CASE : Any = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 74 | """simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCamelCase ( ) ->Optional[int]:
_lowerCamelCase : int = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
_lowerCamelCase : Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
_lowerCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_lowerCamelCase : List[str] = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 434 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Any = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ["""PoolFormerFeatureExtractor"""]
lowerCAmelCase : Tuple = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 715 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase : Any = 1.054571817E-34 # unit of ℏ : J * s
lowerCAmelCase : List[str] = 3E8 # unit of c : m * s^-1
def _A ( A ,A ,A ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowercase : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase : Optional[int] = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase : List[str] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A :
'''simple docstring'''
A__ = None
A__ = False
A__ = False
A__ = False
A__ = None
A__ = None
A__ = False
A__ = False
A__ = False
A__ = True
A__ = None
A__ = 1
A__ = None
A__ = False
A__ = None
A__ = None
def lowerCamelCase__ (self : int ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(_UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 15 |
"""simple docstring"""
import qiskit
def __A ( a_ : int , a_ : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : str = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE : int = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 698 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase = logging.getLogger(__name__)
class A_ :
def __init__( self : Dict ) -> List[Any]:
__magic_name__ = False
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ) -> int:
if not self.initialized:
__magic_name__ = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
__magic_name__ = True
def _snake_case ( self : Dict ) -> List[str]:
self.retriever.index.init_index()
def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> Optional[Any]:
__magic_name__ , __magic_name__ = self.retriever._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return doc_ids, retrieved_doc_embeds
class A_ ( snake_case_ ):
def __init__( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None ) -> Union[str, Any]:
if index is not None and index.is_initialized() and len(__lowerCamelCase ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
__magic_name__ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for worker in self.retrieval_workers
] )
def _snake_case ( self : List[str] ) -> List[str]:
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__magic_name__ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__magic_name__ , __magic_name__ = ray.get(random_worker.retrieve.remote(__lowerCamelCase , __lowerCamelCase ) )
else:
__magic_name__ , __magic_name__ = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
@classmethod
def _snake_case ( cls : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : int=None , **__lowerCamelCase : List[str] ) -> List[Any]:
return super(__lowerCamelCase , cls ).get_tokenizers(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
@classmethod
def _snake_case ( cls : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Dict ) -> Tuple:
__magic_name__ = kwargs.pop("config" , __lowerCamelCase ) or RagConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = RagTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
__magic_name__ = rag_tokenizer.question_encoder
__magic_name__ = rag_tokenizer.generator
if indexed_dataset is not None:
__magic_name__ = "custom"
__magic_name__ = CustomHFIndex(config.retrieval_vector_size , __lowerCamelCase )
else:
__magic_name__ = cls._build_index(__lowerCamelCase )
return cls(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , retrieval_workers=__lowerCamelCase , index=__lowerCamelCase , )
| 710 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( __lowerCamelCase:float , __lowerCamelCase:float , __lowerCamelCase:int ):
'''simple docstring'''
__magic_name__ = x
__magic_name__ = y
for step in range(__lowerCamelCase ): # noqa: B007
__magic_name__ = a * a - b * b + x
__magic_name__ = 2 * a * b + y
__magic_name__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( __lowerCamelCase:float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _lowerCAmelCase ( __lowerCamelCase:float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def _lowerCAmelCase ( __lowerCamelCase:int = 8_0_0 , __lowerCamelCase:int = 6_0_0 , __lowerCamelCase:float = -0.6 , __lowerCamelCase:float = 0 , __lowerCamelCase:float = 3.2 , __lowerCamelCase:int = 5_0 , __lowerCamelCase:bool = True , ):
'''simple docstring'''
__magic_name__ = Image.new("RGB" , (image_width, image_height) )
__magic_name__ = img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
__magic_name__ = figure_width / image_width * image_height
__magic_name__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
__magic_name__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
__magic_name__ = get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__magic_name__ = get_color_coded_rgb(__lowerCamelCase )
else:
__magic_name__ = get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 468 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: List[str]=13 ,__lowerCAmelCase: int=2 ,__lowerCAmelCase: Dict=24 ,__lowerCAmelCase: int=16 ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Optional[int]=32 ,__lowerCAmelCase: Optional[int]=5 ,__lowerCAmelCase: Optional[Any]=4 ,__lowerCAmelCase: Optional[Any]=37 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[Any]=10 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Dict=2 ,__lowerCAmelCase: Dict=2 ,):
'''simple docstring'''
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : Any = max_length
_lowerCamelCase : Any = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : int = scope
_lowerCamelCase : Any = frequency_stride
_lowerCamelCase : Optional[int] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : str = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : List[Any] = frequency_out_dimension * time_out_dimension
_lowerCamelCase : Optional[Any] = num_patches + 2
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : List[Any] = self.get_config()
return config, input_values, labels
def _lowercase ( self: str ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,)
def _lowercase ( self: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Dict = config_and_inputs
_lowerCamelCase : int = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = ASTModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[str] = [*signature.parameters.keys()]
_lowerCamelCase : Tuple = ["input_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Any = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = torchaudio.load(_lowerCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Dict ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.default_feature_extractor
_lowerCamelCase : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.default_feature_extractor
_lowerCamelCase, _lowerCamelCase : Any = prepare_audio()
_lowerCamelCase : int = audio.squeeze().numpy()
_lowerCamelCase : List[str] = feature_extractor(__lowerCAmelCase ,sampling_rate=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : str = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : str = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) ) | 46 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''') | 46 | 1 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def UpperCAmelCase__ ( A__ ) -> Union[str, Any]:
"""simple docstring"""
if hor == 128:
lowerCamelCase__ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowerCamelCase__ = (32, 128, 256)
lowerCamelCase__ = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
lowerCamelCase__ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowerCamelCase__ = (32, 64, 128, 256)
lowerCamelCase__ = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
lowerCamelCase__ = torch.load(f'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
lowerCamelCase__ = UNetaDModel(**A__ )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(A__ )
hf_value_function.load_state_dict(A__ )
torch.save(hf_value_function.state_dict() , f'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(f'hub/hopper-medium-v2/unet/hor{hor}/config.json' , "w" ) as f:
json.dump(A__ , A__ )
def UpperCAmelCase__ ( ) -> str:
"""simple docstring"""
lowerCamelCase__ = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
lowerCamelCase__ = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
lowerCamelCase__ = model
lowerCamelCase__ = UNetaDModel(**A__ )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase__ = state_dict.pop(A__ )
hf_value_function.load_state_dict(A__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(A__ , A__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 703 |
"""simple docstring"""
def UpperCAmelCase__ ( A__ ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
lowerCamelCase__ = [True] * (num + 1)
lowerCamelCase__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , A__ ):
lowerCamelCase__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 274 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 500
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
A = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 500
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
A = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
# This test is for deprecated behavior and can be removed in v5
try:
A = tempfile.mktemp()
with open(__UpperCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , __UpperCamelCase )
A = AlbertTokenizer.from_pretrained(__UpperCamelCase )
finally:
os.remove(__UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , __UpperCamelCase )
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self : str ) -> int:
# This test is for deprecated behavior and can be removed in v5
A = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
A_ : str = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __UpperCamelCase ( cls : Tuple ) -> Dict:
A = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def __UpperCamelCase ( cls : Any ) -> Dict:
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizer(__UpperCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
A = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase , repo_id='test-tokenizer' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
A = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizer(__UpperCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
A = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__UpperCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
A = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = CustomTokenizer(__UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
A = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizerFast.from_pretrained(__UpperCamelCase )
bert_tokenizer.save_pretrained(__UpperCamelCase )
A = CustomTokenizerFast.from_pretrained(__UpperCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
A = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
A = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__UpperCamelCase , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Any:
A = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self : Dict ) -> Tuple:
A = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
A = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
A = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
A = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
A = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
A = Trie()
A = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__UpperCamelCase , ['AB', 'C'] ) | 106 | '''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __UpperCAmelCase ( a_: Optional[Any] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings", set() )
@pytest.fixture
def __UpperCAmelCase ( a_: Tuple ):
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = metric_id
class A__ :
"""simple docstring"""
UpperCamelCase_ : Dict = [MetricMock(UpperCamelCase ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub", HfhMock() )
@pytest.mark.parametrize(
"func, args", [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def __UpperCAmelCase ( a_: str, a_: str, a_: Tuple, a_: Any, a_: List[Any] ):
if "tmp_path" in args:
_UpperCAmelCase : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(a_, match="https://huggingface.co/docs/evaluate" ):
func(*a_ ) | 494 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCAmelCase ( A ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class snake_case_ ( a ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( A_ ) -> Union[str, Any]:
UpperCAmelCase__ =parser.add_parser(
"convert", help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.", )
train_parser.add_argument("--model_type", type=A_, required=A_, help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint", type=A_, required=A_, help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output", type=A_, required=A_, help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config", type=A_, default="", help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name", type=A_, default=A_, help="Optional fine-tuning task name if the TF model was a finetuned model.", )
train_parser.set_defaults(func=A_ )
def __init__( self, A_, A_, A_, A_, A_, *A_, ) -> List[str]:
UpperCAmelCase__ =logging.get_logger("transformers-cli/converting" )
self._logger.info(f"""Loading model {model_type}""" )
UpperCAmelCase__ =model_type
UpperCAmelCase__ =tf_checkpoint
UpperCAmelCase__ =pytorch_dump_output
UpperCAmelCase__ =config
UpperCAmelCase__ =finetuning_task_name
def __UpperCAmelCase ( self ) -> Tuple:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase__ =self._tf_checkpoint
UpperCAmelCase__ =""
else:
UpperCAmelCase__ =self._tf_checkpoint
UpperCAmelCase__ =""
convert_transfo_xl_checkpoint_to_pytorch(
A_, self._config, self._pytorch_dump_output, A_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 510 |
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( A , A = 2 , A = 1 , A = 3 , ):
'''simple docstring'''
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(A , A , A ) -> int:
return (pow(A , 2 ) + step) % modulus
for _ in range(A ):
# These track the position within the cycle detection logic.
UpperCAmelCase__ =seed
UpperCAmelCase__ =seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCAmelCase__ =rand_fn(A , A , A )
UpperCAmelCase__ =rand_fn(A , A , A )
UpperCAmelCase__ =rand_fn(A , A , A )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCAmelCase__ =gcd(hare - tortoise , A )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCAmelCase__ =hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
UpperCamelCase_ = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 510 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.