code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2 , __UpperCamelCase=99 , __UpperCamelCase=0 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase="last" , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=0 , ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : Optional[int] = batch_size
__UpperCamelCase : int = seq_length
__UpperCamelCase : int = is_training
__UpperCamelCase : List[Any] = use_input_lengths
__UpperCamelCase : Tuple = use_token_type_ids
__UpperCamelCase : int = use_labels
__UpperCamelCase : Optional[int] = gelu_activation
__UpperCamelCase : Optional[Any] = sinusoidal_embeddings
__UpperCamelCase : Optional[Any] = causal
__UpperCamelCase : str = asm
__UpperCamelCase : List[Any] = n_langs
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : List[str] = n_special
__UpperCamelCase : Optional[Any] = hidden_size
__UpperCamelCase : Optional[int] = num_hidden_layers
__UpperCamelCase : List[str] = num_attention_heads
__UpperCamelCase : int = hidden_dropout_prob
__UpperCamelCase : Any = attention_probs_dropout_prob
__UpperCamelCase : Any = max_position_embeddings
__UpperCamelCase : Optional[Any] = type_sequence_label_size
__UpperCamelCase : Optional[int] = initializer_range
__UpperCamelCase : List[Any] = num_labels
__UpperCamelCase : Dict = num_choices
__UpperCamelCase : List[str] = summary_type
__UpperCamelCase : List[str] = use_proj
__UpperCamelCase : Dict = scope
__UpperCamelCase : str = bos_token_id
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : List[Any] = None
if self.use_input_lengths:
__UpperCamelCase : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase : str = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , 2 ).float()
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = XLMModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : str = model(__UpperCamelCase , lengths=__UpperCamelCase , langs=__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = model(__UpperCamelCase , langs=__UpperCamelCase )
__UpperCamelCase : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : int = XLMWithLMHeadModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Optional[int] = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = XLMForQuestionAnsweringSimple(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Union[str, Any] = model(__UpperCamelCase )
__UpperCamelCase : List[Any] = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> int:
'''simple docstring'''
__UpperCamelCase : Dict = XLMForQuestionAnswering(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : List[Any] = model(__UpperCamelCase )
__UpperCamelCase : List[Any] = model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , p_mask=__UpperCamelCase , )
__UpperCamelCase : str = model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , )
((__UpperCamelCase) , ) : str = result_with_labels.to_tuple()
__UpperCamelCase : int = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
((__UpperCamelCase) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> str:
'''simple docstring'''
__UpperCamelCase : Dict = XLMForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : str = model(__UpperCamelCase )
__UpperCamelCase : Tuple = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Any:
'''simple docstring'''
__UpperCamelCase : str = self.num_labels
__UpperCamelCase : Union[str, Any] = XLMForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = self.num_choices
__UpperCamelCase : str = XLMForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : Optional[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : str = config_and_inputs
__UpperCamelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : str = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase : Any = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Any = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
__UpperCamelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = XLMModelTester(self )
__UpperCamelCase : List[str] = ConfigTester(self , config_class=__UpperCamelCase , emb_dim=37 )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(
[isinstance(__UpperCamelCase , __UpperCamelCase ) for iter_attentions in attentions] , [True] * len(__UpperCamelCase ) )
self.assertEqual(len(__UpperCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__UpperCamelCase ):
# adds PAD dummy token
__UpperCamelCase : Any = min_length + idx + 1
__UpperCamelCase : Any = min_length + idx + 1
__UpperCamelCase : int = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__UpperCamelCase ) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(
[isinstance(__UpperCamelCase , __UpperCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(__UpperCamelCase ) , )
self.assertEqual(len(__UpperCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__UpperCamelCase ):
# adds PAD dummy token
__UpperCamelCase : Optional[int] = min_length + idx + 1
__UpperCamelCase : Union[str, Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__UpperCamelCase ) , )
pass
@slow
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[Any] = XLMModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(__UpperCamelCase )
__UpperCamelCase : Optional[Any] = torch.tensor([[14, 4_47]] , dtype=torch.long , device=__UpperCamelCase ) # the president
__UpperCamelCase : List[Any] = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__UpperCamelCase : Optional[Any] = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __UpperCamelCase ) | 327 |
import unittest
from transformers import DonutProcessor
lowercase : Optional[int] = "naver-clova-ix/donut-base"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = DonutProcessor.from_pretrained(__UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
__UpperCamelCase : int = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
__UpperCamelCase : List[str] = self.processor.tokenajson(__UpperCamelCase )
self.assertDictEqual(__UpperCamelCase , __UpperCamelCase ) | 327 | 1 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _UpperCAmelCase( unittest.TestCase ):
def __init__( self , __a , __a=2 , __a=56 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=2 , __a=7 , __a="gelu_new" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=4 , __a="block_sparse" , __a=True , __a=False , __a=2 , __a=3 , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
_UpperCamelCase = rescale_embeddings
_UpperCamelCase = attention_type
_UpperCamelCase = use_bias
_UpperCamelCase = block_size
_UpperCamelCase = num_random_blocks
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxBigBirdModelTester(self)
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''')
self.assertIsNotNone(__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = model_class(__a)
@jax.jit
def model_jitted(__a , __a=None , **__a):
return model(input_ids=__a , attention_mask=__a , **__a)
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = model_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = model_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase ( self , __a , __a , __a , __a=1e-5 , __a="outputs" , __a=None) -> Any:
'''simple docstring'''
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions'''):
return
else:
super().check_pt_flax_outputs(__a , __a , __a , __a , __a , __a)
| 78 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case=False ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCamelCase = ''''''
else:
_UpperCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCamelCase = in_proj_bias[: config.hidden_size]
_UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = dct.pop(__snake_case )
_UpperCamelCase = val
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = ViTConfig()
_UpperCamelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_UpperCamelCase = True
_UpperCamelCase = int(vit_name[-12:-10] )
_UpperCamelCase = int(vit_name[-9:-6] )
else:
_UpperCamelCase = 10_00
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = int(vit_name[-6:-4] )
_UpperCamelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
_UpperCamelCase = 1_92
_UpperCamelCase = 7_68
_UpperCamelCase = 12
_UpperCamelCase = 3
elif vit_name[9:].startswith('''small''' ):
_UpperCamelCase = 3_84
_UpperCamelCase = 15_36
_UpperCamelCase = 12
_UpperCamelCase = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
_UpperCamelCase = 7_68
_UpperCamelCase = 23_04
_UpperCamelCase = 8
_UpperCamelCase = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
_UpperCamelCase = 10_24
_UpperCamelCase = 40_96
_UpperCamelCase = 24
_UpperCamelCase = 16
elif vit_name[4:].startswith('''huge''' ):
_UpperCamelCase = 12_80
_UpperCamelCase = 51_20
_UpperCamelCase = 32
_UpperCamelCase = 16
# load original model from timm
_UpperCamelCase = timm.create_model(__snake_case, pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCamelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
_UpperCamelCase = create_rename_keys(__snake_case, __snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCamelCase = ViTModel(__snake_case ).eval()
else:
_UpperCamelCase = ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_UpperCamelCase = DeiTImageProcessor(size=config.image_size )
else:
_UpperCamelCase = ViTImageProcessor(size=config.image_size )
_UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' )
_UpperCamelCase = encoding['''pixel_values''']
_UpperCamelCase = model(__snake_case )
if base_model:
_UpperCamelCase = timm_model.forward_features(__snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__snake_case, outputs.pooler_output, atol=1e-3 )
else:
_UpperCamelCase = timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_a = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 78 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , snake_case_ : List[Any] , snake_case_ : Optional[int]=7 , snake_case_ : Tuple=3 , snake_case_ : Any=18 , snake_case_ : Optional[int]=30 , snake_case_ : Any=400 , snake_case_ : int=True , snake_case_ : Optional[Any]=None , snake_case_ : Any=True , ):
snake_case__ : int = size if size is not None else {"""height""": 18, """width""": 18}
snake_case__ : Optional[int] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : Tuple = image_size
snake_case__ : str = min_resolution
snake_case__ : str = max_resolution
snake_case__ : List[str] = do_resize
snake_case__ : Tuple = size
snake_case__ : Optional[Any] = apply_ocr
def lowerCamelCase ( self : Union[str, Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCamelCase ( self : int ):
snake_case__ : Dict = LayoutLMvaImageProcessingTester(self )
@property
def lowerCamelCase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : List[str] ):
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """apply_ocr""" ) )
def lowerCamelCase ( self : Any ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCamelCase ( self : List[Any] ):
pass
def lowerCamelCase ( self : Any ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , snake_case_ )
self.assertIsInstance(encoding.boxes , snake_case_ )
# Test batched
snake_case__ : Optional[Any] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Tuple = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase ( self : int ):
# Initialize image_processing
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCamelCase ( self : int ):
# with apply_OCR = True
snake_case__ : Optional[int] = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ : List[str] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case__ : int = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case__ : Dict = image_processing(snake_case_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ : List[Any] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case_ )
self.assertListEqual(encoding.boxes , snake_case_ )
# with apply_OCR = False
snake_case__ : Dict = LayoutLMvaImageProcessor(apply_ocr=snake_case_ )
snake_case__ : List[str] = image_processing(snake_case_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 374 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__a = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 374 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ["ConditionalDetrFeatureExtractor"]
lowercase__ : Union[str, Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
lowercase__ : Optional[int] = range(2, 20 + 1)
lowercase__ : List[str] = [10**k for k in range(ks[-1] + 1)]
lowercase__ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = sum(a_i[j] for j in range(_A , len(_A ) ) )
snake_case_ = sum(a_i[j] * base[j] for j in range(min(len(_A ) , _A ) ) )
snake_case_ , snake_case_ = 0, 0
snake_case_ = n - i
snake_case_ = memo.get(_A )
if sub_memo is not None:
snake_case_ = sub_memo.get(_A )
if jumps is not None and len(_A ) > 0:
# find and make the largest jump without going over
snake_case_ = -1
for _k in range(len(_A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case_ = _k
break
if max_jump >= 0:
snake_case_ , snake_case_ , snake_case_ = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case_ = diff + c
for j in range(min(_A , len(_A ) ) ):
snake_case_ , snake_case_ = divmod(_A , 10 )
if new_c > 0:
add(_A , _A , _A )
else:
snake_case_ = []
else:
snake_case_ = {c: []}
snake_case_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case_ , snake_case_ = next_term(_A , k - 1 , i + dn , _A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case_ , snake_case_ = compute(_A , _A , i + dn , _A )
diff += _diff
dn += terms_jumped
snake_case_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case_ = 0
while j < len(_A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_A , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(_A ):
a_i.extend([0 for _ in range(k - len(_A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case_ = i
snake_case_ , snake_case_ , snake_case_ = 0, 0, 0
for j in range(len(_A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case_ = ds_c + ds_b
diff += addend
snake_case_ = 0
for j in range(_A ):
snake_case_ = a_i[j] + addend
snake_case_ , snake_case_ = divmod(_A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_A , _A , _A )
return diff, i - start_i
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
for j in range(_A , len(_A ) ):
snake_case_ = digits[j] + addend
if s >= 10:
snake_case_ , snake_case_ = divmod(_A , 10 )
snake_case_ = addend // 10 + quotient
else:
snake_case_ = s
snake_case_ = addend // 10
if addend == 0:
break
while addend > 0:
snake_case_ , snake_case_ = divmod(_A , 10 )
digits.append(_A )
def lowerCamelCase__ ( _A = 10**15 ):
'''simple docstring'''
snake_case_ = [1]
snake_case_ = 1
snake_case_ = 0
while True:
snake_case_ , snake_case_ = next_term(_A , 20 , i + dn , _A )
dn += terms_jumped
if dn == n - i:
break
snake_case_ = 0
for j in range(len(_A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 139 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCAmelCase : Tuple = re.compile(R'\s+')
def A_ ( a ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(a , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [len(a ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(a ), "line_max": max(a )}
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A_ ( a , a ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A_ ( a , a=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['auto-generated', 'autogenerated', 'automatically generated']
SCREAMING_SNAKE_CASE_ : str = example['content'].splitlines()
for _, line in zip(range(a ) , a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( a , a=5 , a=0.05 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['unit tests', 'test file', 'configuration file']
SCREAMING_SNAKE_CASE_ : str = example['content'].splitlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
# first test
for _, line in zip(range(a ) , a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
SCREAMING_SNAKE_CASE_ : Tuple = example['content'].count('\n' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ['def ', 'class ', 'for ', 'while ']
SCREAMING_SNAKE_CASE_ : List[str] = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( a , a=4 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = example['content'].splitlines()
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = tokenizer(example['content'] , truncation=a )['input_ids']
SCREAMING_SNAKE_CASE_ : str = len(example['content'] ) / len(a )
return {"ratio": ratio}
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
results.update(get_hash(a ) )
results.update(line_stats(a ) )
results.update(alpha_stats(a ) )
results.update(char_token_ratio(a ) )
results.update(is_autogenerated(a ) )
results.update(is_config_or_test(a ) )
results.update(has_no_keywords(a ) )
results.update(has_few_assignments(a ) )
return results
def A_ ( a , a , a ):
"""simple docstring"""
if not check_uniques(a , a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( a ):
"""simple docstring"""
with open(a , 'rb' ) as f_in:
with gzip.open(str(a ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(a , a )
os.unlink(a )
# Settings
lowerCAmelCase : List[Any] = HfArgumentParser(PreprocessingArguments)
lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any = multiprocessing.cpu_count()
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCAmelCase : Tuple = time.time()
lowerCAmelCase : Dict = load_dataset(args.dataset_name, split='train')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
lowerCAmelCase : Dict = time.time()
lowerCAmelCase : Any = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
lowerCAmelCase : List[Any] = set(ds.unique('hash'))
lowerCAmelCase : Dict = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
lowerCAmelCase : str = time.time()
lowerCAmelCase : Optional[int] = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCAmelCase : Optional[Any] = time.time()
lowerCAmelCase , lowerCAmelCase : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
lowerCAmelCase : Dict = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowerCAmelCase : Any = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
lowerCAmelCase : Optional[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCAmelCase : Optional[Any] = str(data_dir / F'file-{file_number+1:012}.json')
lowerCAmelCase : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}')
| 511 |
from __future__ import annotations
lowerCAmelCase : List[Any] = list[list[int]]
# assigning initial values to the grid
lowerCAmelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCAmelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A_ ( a , a , a , a ):
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A_ ( a ):
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A_ ( a ):
"""simple docstring"""
if location := find_empty_location(a ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(a , a , a , a ):
SCREAMING_SNAKE_CASE_ : List[str] = digit
if sudoku(a ) is not None:
return grid
SCREAMING_SNAKE_CASE_ : List[Any] = 0
return None
def A_ ( a ):
"""simple docstring"""
for row in grid:
for cell in row:
print(a , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
lowerCAmelCase : Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 511 | 1 |
from ..utils import DummyObject, requires_backends
class _a (metaclass=__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = ['''onnx''']
def __init__( self , *A__ , **A__ ):
requires_backends(self , ["""onnx"""] )
@classmethod
def __A ( cls , *A__ , **A__ ):
requires_backends(cls , ["""onnx"""] )
@classmethod
def __A ( cls , *A__ , **A__ ):
requires_backends(cls , ["""onnx"""] )
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Any = "deformable_detr"
__UpperCamelCase: int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , A : List[Any]=True , A : List[Any]=None , A : Tuple=3 , A : Tuple=300 , A : Tuple=1024 , A : Any=6 , A : str=1024 , A : int=8 , A : Optional[int]=6 , A : Optional[int]=1024 , A : Optional[int]=8 , A : List[Any]=0.0 , A : Dict=True , A : Dict="relu" , A : Tuple=256 , A : List[str]=0.1 , A : List[Any]=0.0 , A : Optional[Any]=0.0 , A : List[str]=0.02 , A : Optional[Any]=1.0 , A : Any=True , A : Optional[int]=False , A : Dict="sine" , A : Optional[int]="resnet50" , A : str=True , A : Union[str, Any]=False , A : Any=4 , A : Dict=4 , A : Optional[Any]=4 , A : Any=False , A : Union[str, Any]=300 , A : List[Any]=False , A : List[Any]=1 , A : Tuple=5 , A : Any=2 , A : Tuple=1 , A : List[Any]=1 , A : List[Any]=5 , A : Any=2 , A : Optional[Any]=0.1 , A : List[str]=0.25 , A : List[str]=False , **A : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A , A ):
_UpperCAmelCase : Any = backbone_config.get("model_type" )
_UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : List[str] = config_class.from_dict(A )
_UpperCAmelCase : List[str] = use_timm_backbone
_UpperCAmelCase : int = backbone_config
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Tuple = num_queries
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : List[str] = d_model
_UpperCAmelCase : Optional[int] = encoder_ffn_dim
_UpperCAmelCase : int = encoder_layers
_UpperCAmelCase : Tuple = encoder_attention_heads
_UpperCAmelCase : Any = decoder_ffn_dim
_UpperCAmelCase : Dict = decoder_layers
_UpperCAmelCase : List[str] = decoder_attention_heads
_UpperCAmelCase : Dict = dropout
_UpperCAmelCase : Tuple = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : Optional[int] = activation_function
_UpperCAmelCase : List[Any] = init_std
_UpperCAmelCase : Optional[Any] = init_xavier_std
_UpperCAmelCase : Tuple = encoder_layerdrop
_UpperCAmelCase : str = auxiliary_loss
_UpperCAmelCase : Union[str, Any] = position_embedding_type
_UpperCAmelCase : Optional[int] = backbone
_UpperCAmelCase : Optional[int] = use_pretrained_backbone
_UpperCAmelCase : int = dilation
# deformable attributes
_UpperCAmelCase : Dict = num_feature_levels
_UpperCAmelCase : Union[str, Any] = encoder_n_points
_UpperCAmelCase : List[str] = decoder_n_points
_UpperCAmelCase : Optional[Any] = two_stage
_UpperCAmelCase : int = two_stage_num_proposals
_UpperCAmelCase : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_UpperCAmelCase : Tuple = class_cost
_UpperCAmelCase : Union[str, Any] = bbox_cost
_UpperCAmelCase : List[Any] = giou_cost
# Loss coefficients
_UpperCAmelCase : Optional[Any] = mask_loss_coefficient
_UpperCAmelCase : Tuple = dice_loss_coefficient
_UpperCAmelCase : Tuple = bbox_loss_coefficient
_UpperCAmelCase : Any = giou_loss_coefficient
_UpperCAmelCase : Optional[int] = eos_coefficient
_UpperCAmelCase : Dict = focal_alpha
_UpperCAmelCase : Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=A , **A )
@property
def _A ( self : str ):
return self.encoder_attention_heads
@property
def _A ( self : int ):
return self.d_model
def _A ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCAmelCase : Optional[int] = self.backbone_config.to_dict()
_UpperCAmelCase : int = self.__class__.model_type
return output
| 244 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : List[str] = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ["""GLPNFeatureExtractor"""]
__SCREAMING_SNAKE_CASE : str = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 244 | 1 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A_ = True
except ImportError:
A_ = False
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def A ( _UpperCAmelCase : Namespace ) -> Optional[int]:
'''simple docstring'''
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class UpperCamelCase__ ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def snake_case ( SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase : Optional[Any] = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=_SCREAMING_SNAKE_CASE , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=_SCREAMING_SNAKE_CASE , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , *SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase : Union[str, Any] = testing
__lowerCAmelCase : Optional[Any] = testing_file
__lowerCAmelCase : Tuple = path
def snake_case ( self ) -> str:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__lowerCAmelCase : List[str] = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
__lowerCAmelCase : Union[str, Any] = (
Path(_SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__lowerCAmelCase : Union[str, Any] = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
__lowerCAmelCase : Dict = json.load(_SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_SCREAMING_SNAKE_CASE , extra_context=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
__lowerCAmelCase : Any = json.load(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = configuration['lowercase_modelname']
__lowerCAmelCase : str = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F"""{directory}/configuration.json""" )
__lowerCAmelCase : Optional[int] = 'PyTorch' in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase : List[Any] = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase : int = 'Flax' in generate_tensorflow_pytorch_and_flax
__lowerCAmelCase : List[str] = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=_SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , 'w' ):
pass
shutil.move(
F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
__lowerCAmelCase : Optional[int] = f.readlines()
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Create temp file
__lowerCAmelCase , __lowerCAmelCase : Dict = mkstemp()
__lowerCAmelCase : Union[str, Any] = False
with fdopen(_SCREAMING_SNAKE_CASE , 'w' ) as new_file:
with open(_SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(_SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
__lowerCAmelCase : Any = True
for line_to_copy in lines_to_copy:
new_file.write(_SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Remove original file
remove(_SCREAMING_SNAKE_CASE )
# Move new file
move(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def skip_units(SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ) as datafile:
__lowerCAmelCase : Optional[Any] = []
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__lowerCAmelCase : Optional[Any] = line.split('\"' )[1]
__lowerCAmelCase : Union[str, Any] = skip_units(_SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
__lowerCAmelCase : List[Any] = line.split('\"' )[1]
__lowerCAmelCase : str = skip_units(_SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = []
elif "# Replace with" in line and "##" not in line:
__lowerCAmelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(_SCREAMING_SNAKE_CASE )
remove(_SCREAMING_SNAKE_CASE )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(_SCREAMING_SNAKE_CASE )
| 709 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def snake_case ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def snake_case ( self ) -> Optional[Any]:
raise NotImplementedError()
| 123 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''dandelin/vilt-b32-finetuned-vqa'''
UpperCamelCase = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
UpperCamelCase = '''image_qa'''
UpperCamelCase = AutoProcessor
UpperCamelCase = AutoModelForVisualQuestionAnswering
UpperCamelCase = ['''image''', '''text''']
UpperCamelCase = ['''text''']
def __init__( self : Dict , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : int , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(_UpperCAmelCase , _UpperCAmelCase , return_tensors="pt" )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.model(**_UpperCAmelCase ).logits
def lowercase__ ( self : str , _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 82 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__magic_name__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( datasets.BuilderConfig ):
"""simple docstring"""
__lowercase : int = 10000
__lowercase : Optional[List[str]] = None
__lowercase : Optional[datasets.Features] = None
class SCREAMING_SNAKE_CASE_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__lowercase : Any = ParquetConfig
def snake_case_ ( self):
return datasets.DatasetInfo(features=self.config.features)
def snake_case_ ( self , lowerCAmelCase__):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase__ , (str, list, tuple)):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCAmelCase__) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowerCAmelCase__):
with open(lowerCAmelCase__ , """rb""") as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(lowerCAmelCase__))
break
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"""files""": files}))
return splits
def snake_case_ ( self , lowerCAmelCase__):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(lowerCAmelCase__ , self.info.features.arrow_schema)
return pa_table
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'")
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__)):
with open(lowerCAmelCase__ , """rb""") as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(lowerCAmelCase__)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(lowerCAmelCase__)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(lowerCAmelCase__)}: {e}")
raise
| 155 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCAmelCase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCAmelCase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
UpperCAmelCase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
UpperCAmelCase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
UpperCAmelCase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
UpperCAmelCase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 80 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
return F'gaussian_noise_s={seed}_shape={"_".join([str(UpperCamelCase__ ) for s in shape] )}.npy'
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase ( self , UpperCamelCase__=0 , UpperCamelCase__=(4, 4, 64, 64) , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ )
return image
def lowerCamelCase ( self , UpperCamelCase__=False , UpperCamelCase__="CompVis/stable-diffusion-v1-4" ) -> Optional[int]:
'''simple docstring'''
snake_case : str = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Tuple = 'bf16' if fpaa else None
snake_case : str = FlaxUNetaDConditionModel.from_pretrained(
UpperCamelCase__ , subfolder="unet" , dtype=UpperCamelCase__ , revision=UpperCamelCase__ )
return model, params
def lowerCamelCase ( self , UpperCamelCase__=0 , UpperCamelCase__=(4, 77, 768) , UpperCamelCase__=False ) -> str:
'''simple docstring'''
snake_case : Tuple = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : Any = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=UpperCamelCase__ )
snake_case : Any = self.get_latents(UpperCamelCase__ , fpaa=UpperCamelCase__ )
snake_case : Tuple = self.get_encoder_hidden_states(UpperCamelCase__ , fpaa=UpperCamelCase__ )
snake_case : int = model.apply(
{"params": params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : List[str] = jnp.array(UpperCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : Any = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=UpperCamelCase__ )
snake_case : Any = self.get_latents(UpperCamelCase__ , shape=(4, 4, 96, 96) , fpaa=UpperCamelCase__ )
snake_case : List[Any] = self.get_encoder_hidden_states(UpperCamelCase__ , shape=(4, 77, 1024) , fpaa=UpperCamelCase__ )
snake_case : Optional[Any] = model.apply(
{"params": params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
snake_case : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : int = jnp.array(UpperCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
| 178 |
_lowercase : Any ={"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []}
_lowercase : Union[str, Any] =["""a""", """b""", """c""", """d""", """e"""]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : int = start
# add current to visited
visited.append(lowerCAmelCase__ )
lowerCamelCase_ : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCamelCase_ : Dict = topological_sort(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# if all neighbors visited add current to sort
sort.append(lowerCAmelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
for vertice in vertices:
if vertice not in visited:
lowerCamelCase_ : Any = topological_sort(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# return sort
return sort
if __name__ == "__main__":
_lowercase : str =topological_sort("""a""", [], [])
print(sort)
| 364 | 0 |
"""simple docstring"""
from math import isqrt
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = False
return [i for i in range(2 , __lowerCAmelCase ) if is_prime[i]]
def _UpperCAmelCase ( lowerCamelCase__ = 10**8 ):
"""simple docstring"""
lowerCAmelCase__ = calculate_prime_numbers(max_number // 2 )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(__lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 718 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ )
# save in new folder
model_config.save_pretrained(snake_case__ )
config.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with self.assertRaisesRegex(
snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with self.assertRaisesRegex(
snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoImageProcessor.register(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = True
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(snake_case__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 674 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 10_00 ):
'''simple docstring'''
_snake_case = 1, 1
_snake_case = []
for i in range(1 , n + 1 ):
_snake_case = prev_numerator + 2 * prev_denominator
_snake_case = prev_numerator + prev_denominator
if len(str(__snake_case ) ) > len(str(__snake_case ) ):
result.append(__snake_case )
_snake_case = numerator
_snake_case = denominator
return len(__snake_case )
if __name__ == "__main__":
print(F'{solution() = }')
| 672 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class _snake_case :
'''simple docstring'''
def __init__( self : Dict , snake_case : int , snake_case : MutableSequence[float] ):
if len(snake_case ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCAmelCase_ :list[float] = list(snake_case )
UpperCAmelCase_ :str = degree
def __add__( self : Any , snake_case : Polynomial ):
if self.degree > polynomial_a.degree:
UpperCAmelCase_ :int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , snake_case )
else:
UpperCAmelCase_ :Any = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , snake_case )
def __sub__( self : List[str] , snake_case : Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , snake_case : Polynomial ):
UpperCAmelCase_ :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , snake_case )
def snake_case_ ( self : Optional[Any] , snake_case : int | float ):
UpperCAmelCase_ :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[Any] ):
UpperCAmelCase_ :List[str] = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(snake_case )
return polynomial
def __repr__( self : int ):
return self.__str__()
def snake_case_ ( self : str ):
UpperCAmelCase_ :list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ :str = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , snake_case )
def snake_case_ ( self : Optional[int] , snake_case : int | float = 0 ):
UpperCAmelCase_ :list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ :List[str] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ :Optional[int] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , snake_case )
def __eq__( self : int , snake_case : object ):
if not isinstance(snake_case , snake_case ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[Any] , snake_case : object ):
return not self.__eq__(snake_case )
| 608 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( lowerCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , lowerCAmelCase , )
if isinstance(lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ : Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ : Dict = image[0].size
SCREAMING_SNAKE_CASE_ : Optional[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_ : Optional[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ : Any = np.concatenate(lowerCAmelCase , axis=0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array(lowerCAmelCase ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE_ : List[str] = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_ : Any = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ : str = torch.from_numpy(lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE_ : int = torch.cat(lowerCAmelCase , dim=0 )
return image
def _snake_case ( lowerCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
if isinstance(lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ : List[str] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ : List[str] = mask[0].size
SCREAMING_SNAKE_CASE_ : int = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ : str = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_ : List[Any] = np.concatenate(lowerCAmelCase , axis=0 )
SCREAMING_SNAKE_CASE_ : Tuple = mask.astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE_ : Dict = torch.cat(lowerCAmelCase , dim=0 )
return mask
class a__ ( A__ ):
A = 42
A = 42
def __init__( self : List[str],_A : Any,_A : Any ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A,scheduler=_A )
@torch.no_grad()
def __call__( self : str,_A : Union[torch.Tensor, PIL.Image.Image],_A : Union[torch.Tensor, PIL.Image.Image],_A : int = 250,_A : float = 0.0,_A : int = 10,_A : int = 10,_A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,_A : Optional[str] = "pil",_A : bool = True,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = image
SCREAMING_SNAKE_CASE_ : int = _preprocess_image(_A )
SCREAMING_SNAKE_CASE_ : Tuple = original_image.to(device=self.device,dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ : Any = _preprocess_mask(_A )
SCREAMING_SNAKE_CASE_ : Any = mask_image.to(device=self.device,dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ : Dict = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_A,_A ) and len(_A ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_A )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ : List[str] = original_image.shape
SCREAMING_SNAKE_CASE_ : List[str] = randn_tensor(_A,generator=_A,device=self.device,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_A,_A,_A,self.device )
SCREAMING_SNAKE_CASE_ : Any = eta
SCREAMING_SNAKE_CASE_ : List[Any] = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_ : str = generator[0] if isinstance(_A,_A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_ : Dict = self.unet(_A,_A ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler.step(_A,_A,_A,_A,_A,_A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler.undo_step(_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = t
SCREAMING_SNAKE_CASE_ : List[Any] = (image / 2 + 0.5).clamp(0,1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : Optional[int] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 708 | from pathlib import Path
import fire
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Path(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = Path(lowerCAmelCase )
dest_dir.mkdir(exist_ok=lowerCAmelCase )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE_ : Any = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE_ : int = dest_dir.joinpath(path.name )
print(lowerCAmelCase )
dest_path.open("w" ).write("\n".join(lowerCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 316 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = MODEL_FOR_MASKED_LM_MAPPING
a_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
a_ : Any = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-0_5, """token""": 3_80_15, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-0_5, """token""": 2_55_06, """token_str""": """ accuser"""},
] , )
a_ : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-0_5,
"""token""": 3_80_15,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-0_5,
"""token""": 2_55_06,
"""token_str""": """ accuser""",
},
] , )
a_ : Tuple = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 1_36_06, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-0_5, """token""": 34_99, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-0_5, """token""": 29_41, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
a_ : Any = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-0_5, """token""": 3_56_76, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-0_5, """token""": 1_64_16, """token_str""": """ELS"""},
] , )
a_ : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-0_5,
"""token""": 3_56_76,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-0_5, """token""": 1_64_16, """token_str""": """ELS"""},
] , )
a_ : Tuple = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-0_5, """token""": 34_99, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-0_5, """token""": 29_41, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 1_36_06, """token_str""": """ Clara"""},
] , )
a_ : Any = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=6 ) , [
[
{
"""score""": 2.2E-0_5,
"""token""": 3_56_76,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-0_5, """token""": 1_64_16, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-0_5,
"""token""": 3_56_76,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-0_5, """token""": 1_64_16, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
a_ : str = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
@require_torch
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(lowerCAmelCase_ )
@slow
@require_tf
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : str = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 6_10, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 15_73, """token_str""": """ Chris"""},
] , )
a_ : str = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 22_01,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_27_90,
"""token_str""": """ Lyon""",
},
] , )
a_ : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 34_99, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_36_06, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 29_41, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
a_ : List[Any] = None
a_ : Optional[int] = None
self.run_pipeline_test(lowerCAmelCase_ , [] )
@require_tf
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
a_ : Optional[int] = None
a_ : List[str] = None
self.run_pipeline_test(lowerCAmelCase_ , [] )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
a_ : Optional[Any] = FillMaskPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
a_ : Optional[int] = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] = fill_masker.tokenizer
a_ : Tuple = fill_masker.model
a_ : Union[str, Any] = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
lowerCAmelCase_ , [
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
] , )
a_ : Union[str, Any] = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
lowerCAmelCase_ , [
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
] , )
a_ : Optional[Any] = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
lowerCAmelCase_ , [
[
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
],
[
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
],
] , )
with self.assertRaises(lowerCAmelCase_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCAmelCase_ ):
fill_masker("""This is""" )
self.run_test_top_k(lowerCAmelCase_ , lowerCAmelCase_ )
self.run_test_targets(lowerCAmelCase_ , lowerCAmelCase_ )
self.run_test_top_k_targets(lowerCAmelCase_ , lowerCAmelCase_ )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCAmelCase_ , lowerCAmelCase_ )
self.fill_mask_with_multiple_masks(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] = tokenizer.get_vocab()
a_ : List[Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
a_ : Union[str, Any] = FillMaskPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , targets=lowerCAmelCase_ )
a_ : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCAmelCase_ , [
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
] , )
a_ : Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowerCAmelCase_ )
a_ : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowerCAmelCase_ ) )
# Call argument
a_ : Any = FillMaskPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
a_ : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase_ )
self.assertEqual(
lowerCAmelCase_ , [
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
] , )
a_ : Optional[Any] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowerCAmelCase_ )
a_ : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowerCAmelCase_ ) )
# Score equivalence
a_ : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase_ )
a_ : Tuple = [top_mask["""token_str"""] for top_mask in outputs]
a_ : str = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCAmelCase_ ) == set(lowerCAmelCase_ ):
a_ : Any = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase_ )
a_ : int = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , nested_simplify(lowerCAmelCase_ ) )
# Raises with invalid
with self.assertRaises(lowerCAmelCase_ ):
a_ : Any = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCAmelCase_ ):
a_ : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(lowerCAmelCase_ ):
a_ : str = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="""""" )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Dict = FillMaskPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , top_k=2 )
a_ : Dict = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCAmelCase_ , [
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
] , )
a_ : str = FillMaskPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
a_ : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
] , )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , nested_simplify(lowerCAmelCase_ ) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] = tokenizer.get_vocab()
a_ : List[str] = FillMaskPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
# top_k=2, ntargets=3
a_ : Dict = sorted(vocab.keys() )[:3]
a_ : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=lowerCAmelCase_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
a_ : List[str] = [el["""token_str"""] for el in sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x["score"] , reverse=lowerCAmelCase_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCAmelCase_ ).issubset(lowerCAmelCase_ ):
a_ : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=lowerCAmelCase_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , nested_simplify(lowerCAmelCase_ ) )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : int = FillMaskPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
a_ : Any = tokenizer.get_vocab()
# String duplicates + id duplicates
a_ : List[Any] = sorted(vocab.keys() )[:3]
a_ : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]]
a_ : str = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=lowerCAmelCase_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCAmelCase_ ) , 3 )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] = FillMaskPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
a_ : Dict = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [
[
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
],
[
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
],
[
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
{"""sequence""": ANY(lowerCAmelCase_ ), """score""": ANY(lowerCAmelCase_ ), """token""": ANY(lowerCAmelCase_ ), """token_str""": ANY(lowerCAmelCase_ )},
],
] , )
| 577 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _snake_case ( A_ : Optional[Any] , A_ : List[str] , A_ : Any , A_ : Dict ):
"""simple docstring"""
if isinstance(A_ , A_ ):
a_ : Dict = np.full((len(A_ ), sequence_length, 2) , A_ )
else:
a_ : Tuple = np.full((len(A_ ), sequence_length) , A_ )
for i, tensor in enumerate(A_ ):
if padding_side == "right":
if isinstance(A_ , A_ ):
a_ : List[str] = tensor[:sequence_length]
else:
a_ : int = tensor[:sequence_length]
else:
if isinstance(A_ , A_ ):
a_ : Optional[int] = tensor[:sequence_length]
else:
a_ : Optional[int] = tensor[:sequence_length]
return out_tensor.tolist()
def _snake_case ( A_ : str ):
"""simple docstring"""
a_ : Optional[Any] = ord(A_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
a_ : List[Any] = unicodedata.category(A_ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = -100
a_ = "pt"
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
import torch
a_ : List[Any] = """label""" if """label""" in features[0].keys() else """labels"""
a_ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
a_ : Union[str, Any] = self.tokenizer.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
a_ : Dict = torch.tensor(batch["""entity_ids"""] ).shape[1]
a_ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
a_ : List[str] = [
list(lowerCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase_ )) for label in labels
]
else:
a_ : int = [
[self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase_ )) + list(lowerCAmelCase_ ) for label in labels
]
a_ : int = [feature["""ner_tags"""] for feature in features]
a_ : Union[str, Any] = padding_tensor(lowerCAmelCase_ , -1 , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Dict = [feature["""original_entity_spans"""] for feature in features]
a_ : Optional[Any] = padding_tensor(lowerCAmelCase_ , (-1, -1) , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Any = {k: torch.tensor(lowerCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 577 | 1 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __snake_case :
def __init__( self) -> int:
'''simple docstring'''
a__: str = {}
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase=1) -> List[str]:
'''simple docstring'''
if self.graph.get(lowercase):
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
a__: Union[str, Any] = [[w, v]]
if not self.graph.get(lowercase):
a__: Union[str, Any] = []
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return list(self.graph)
def lowerCamelCase_ ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
if self.graph.get(lowercase):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase)
def lowerCamelCase_ ( self , lowercase=-2 , lowercase=-1) -> Tuple:
'''simple docstring'''
if s == d:
return []
a__: str = []
a__: Tuple = []
if s == -2:
a__: Any = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__: Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__: Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(lowercase)
return visited
else:
stack.append(node[1])
visited.append(node[1])
a__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase) != 0:
a__: Optional[int] = stack[len(lowercase) - 1]
else:
a__: Optional[int] = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return visited
def lowerCamelCase_ ( self , lowercase=-1) -> int:
'''simple docstring'''
if c == -1:
a__: Optional[int] = floor(random() * 1_00_00) + 10
for i in range(lowercase):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02) + 1):
a__: Dict = floor(random() * c) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1)
def lowerCamelCase_ ( self , lowercase=-2) -> Any:
'''simple docstring'''
a__: Optional[Any] = deque()
a__: List[str] = []
if s == -2:
a__: Tuple = list(self.graph)[0]
d.append(lowercase)
visited.append(lowercase)
while d:
a__: str = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: Optional[int] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase_ ( self , lowercase) -> Tuple:
'''simple docstring'''
return len(self.graph[u])
def lowerCamelCase_ ( self , lowercase=-2) -> Any:
'''simple docstring'''
a__: int = []
a__: str = []
if s == -2:
a__: str = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__: List[str] = s
a__: Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__: Any = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
a__: Tuple = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop())
if len(lowercase) != 0:
a__: Optional[Any] = stack[len(lowercase) - 1]
else:
a__: Tuple = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return sorted_nodes
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[Any] = []
a__: Dict = []
a__: Optional[Any] = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__: Tuple = -2
a__: str = []
a__: str = s
a__: List[str] = False
a__: List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__: Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
a__: str = len(lowercase) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
a__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__: str = True
if len(lowercase) != 0:
a__: Optional[int] = stack[len(lowercase) - 1]
else:
a__: Tuple = False
indirect_parents.append(lowercase)
a__: Any = s
a__: str = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return list(lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: List[Any] = []
a__: Tuple = []
a__: List[Any] = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__: List[Any] = -2
a__: Any = []
a__: int = s
a__: Optional[int] = False
a__: Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__: Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
a__: int = len(lowercase) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
a__: str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__: List[str] = True
if len(lowercase) != 0:
a__: Union[str, Any] = stack[len(lowercase) - 1]
else:
a__: Union[str, Any] = False
indirect_parents.append(lowercase)
a__: List[str] = s
a__: str = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return False
def lowerCamelCase_ ( self , lowercase=-2 , lowercase=-1) -> Dict:
'''simple docstring'''
a__: Dict = time()
self.dfs(lowercase , lowercase)
a__: Optional[Any] = time()
return end - begin
def lowerCamelCase_ ( self , lowercase=-2) -> Tuple:
'''simple docstring'''
a__: List[Any] = time()
self.bfs(lowercase)
a__: int = time()
return end - begin
class __snake_case :
def __init__( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = {}
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase=1) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowercase):
# if there already is a edge
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
# if u does not exist
a__: List[str] = [[w, v]]
# add the other way
if self.graph.get(lowercase):
# if there already is a edge
if self.graph[v].count([w, u]) == 0:
self.graph[v].append([w, u])
else:
# if u does not exist
a__: Optional[Any] = [[w, u]]
def lowerCamelCase_ ( self , lowercase , lowercase) -> int:
'''simple docstring'''
if self.graph.get(lowercase):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase)
# the other way round
if self.graph.get(lowercase):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase)
def lowerCamelCase_ ( self , lowercase=-2 , lowercase=-1) -> List[str]:
'''simple docstring'''
if s == d:
return []
a__: Any = []
a__: Optional[Any] = []
if s == -2:
a__: Tuple = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__: Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(lowercase)
return visited
else:
stack.append(node[1])
visited.append(node[1])
a__: str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase) != 0:
a__: Tuple = stack[len(lowercase) - 1]
else:
a__: Optional[int] = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return visited
def lowerCamelCase_ ( self , lowercase=-1) -> Tuple:
'''simple docstring'''
if c == -1:
a__: str = floor(random() * 1_00_00) + 10
for i in range(lowercase):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02) + 1):
a__: Optional[Any] = floor(random() * c) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1)
def lowerCamelCase_ ( self , lowercase=-2) -> Union[str, Any]:
'''simple docstring'''
a__: List[str] = deque()
a__: List[Any] = []
if s == -2:
a__: List[Any] = list(self.graph)[0]
d.append(lowercase)
visited.append(lowercase)
while d:
a__: Optional[int] = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def lowerCamelCase_ ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return len(self.graph[u])
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[Any] = []
a__: Union[str, Any] = []
a__: Dict = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__: Optional[Any] = -2
a__: Tuple = []
a__: Tuple = s
a__: int = False
a__: Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__: Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
a__: Dict = len(lowercase) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
a__: Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__: Optional[Any] = True
if len(lowercase) != 0:
a__: List[str] = stack[len(lowercase) - 1]
else:
a__: Tuple = False
indirect_parents.append(lowercase)
a__: List[str] = s
a__: Dict = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return list(lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[int] = []
a__: List[Any] = []
a__: Union[str, Any] = list(self.graph)[0]
stack.append(lowercase)
visited.append(lowercase)
a__: str = -2
a__: List[str] = []
a__: Optional[int] = s
a__: Tuple = False
a__: List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
a__: Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
a__: Optional[int] = len(lowercase) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
a__: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
a__: int = True
if len(lowercase) != 0:
a__: Any = stack[len(lowercase) - 1]
else:
a__: int = False
indirect_parents.append(lowercase)
a__: Optional[Any] = s
a__: Dict = ss
# check if se have reached the starting point
if len(lowercase) == 0:
return False
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return list(self.graph)
def lowerCamelCase_ ( self , lowercase=-2 , lowercase=-1) -> List[Any]:
'''simple docstring'''
a__: str = time()
self.dfs(lowercase , lowercase)
a__: Dict = time()
return end - begin
def lowerCamelCase_ ( self , lowercase=-2) -> Dict:
'''simple docstring'''
a__: Optional[int] = time()
self.bfs(lowercase)
a__: Any = time()
return end - begin
| 717 | """simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __snake_case :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=64 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
a__: int = parent
a__: Union[str, Any] = batch_size
a__: Optional[int] = seq_length
a__: int = is_training
a__: Optional[Any] = use_input_mask
a__: List[Any] = use_token_type_ids
a__: List[str] = use_labels
a__: Dict = vocab_size
a__: Tuple = hidden_size
a__: Optional[Any] = embedding_size
a__: Optional[int] = num_hidden_layers
a__: Optional[int] = num_attention_heads
a__: Optional[int] = intermediate_size
a__: Dict = hidden_act
a__: List[str] = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: List[str] = max_position_embeddings
a__: str = type_vocab_size
a__: Tuple = type_sequence_label_size
a__: List[Any] = initializer_range
a__: Optional[Any] = num_labels
a__: Optional[int] = num_choices
a__: int = scope
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: Union[str, Any] = None
if self.use_input_mask:
a__: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
a__: Optional[Any] = None
if self.use_token_type_ids:
a__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__: List[Any] = None
a__: Optional[int] = None
a__: Optional[Any] = None
if self.use_labels:
a__: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__: Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
a__: Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: List[str] = MegatronBertModel(config=lowercase)
model.to(lowercase)
model.eval()
a__: List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase)
a__: Any = model(lowercase , token_type_ids=lowercase)
a__: List[Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__: List[str] = MegatronBertForMaskedLM(config=lowercase)
model.to(lowercase)
model.eval()
a__: Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: Dict = MegatronBertForCausalLM(config=lowercase)
model.to(lowercase)
model.eval()
a__: List[str] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = MegatronBertForNextSentencePrediction(config=lowercase)
model.to(lowercase)
model.eval()
a__: str = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__: int = MegatronBertForPreTraining(config=lowercase)
model.to(lowercase)
model.eval()
a__: Dict = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: Dict = MegatronBertForQuestionAnswering(config=lowercase)
model.to(lowercase)
model.eval()
a__: Any = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: Tuple = self.num_labels
a__: Union[str, Any] = MegatronBertForSequenceClassification(lowercase)
model.to(lowercase)
model.eval()
a__: int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: int = self.num_labels
a__: Optional[Any] = MegatronBertForTokenClassification(config=lowercase)
model.to(lowercase)
model.eval()
a__: Tuple = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Dict = self.num_choices
a__: Any = MegatronBertForMultipleChoice(config=lowercase)
model.to(lowercase)
model.eval()
a__: List[str] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__: Dict = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__: List[str] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__: List[Any] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Optional[Any] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
): Tuple = config_and_inputs
a__: Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase=False) -> Optional[int]:
'''simple docstring'''
a__: List[Any] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
if return_labels:
if model_class in get_values(lowercase):
a__: Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase)
a__: List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase)
return inputs_dict
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Dict = MegatronBertModelTester(self)
a__: Dict = ConfigTester(self , config_class=lowercase , hidden_size=37)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase)
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
return torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
lowercase__ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
@unittest.skip('Model is not available.')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Dict = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
a__: List[str] = os.path.join(os.environ['MYDIR'] , lowercase)
a__: Tuple = MegatronBertModel.from_pretrained(lowercase)
model.to(lowercase)
model.half()
a__: Any = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
a__: str = model(lowercase)[0]
a__: Tuple = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , lowercase)
a__: Optional[Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3):
for jj in range(3):
a__: List[Any] = output[0, ii, jj]
a__: Dict = expected[3 * ii + jj]
a__: str = 'ii={} jj={} a={} b={}'.format(lowercase , lowercase , lowercase , lowercase)
self.assertTrue(math.isclose(lowercase , lowercase , rel_tol=lowercase , abs_tol=lowercase) , msg=lowercase)
| 217 | 0 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 399 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a__ : str =logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , __A : bool , __A : Optional[int] = None , __A : Optional[int] = None ):
super().__init__()
__UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__UpperCamelCase = torch.zeros(__A , __A )
else:
__UpperCamelCase = None
__UpperCamelCase = torch.nn.Parameter(__A )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : VQModel
SCREAMING_SNAKE_CASE_ : CLIPTextModel
SCREAMING_SNAKE_CASE_ : CLIPTokenizer
SCREAMING_SNAKE_CASE_ : TransformeraDModel
SCREAMING_SNAKE_CASE_ : LearnedClassifierFreeSamplingEmbeddings
SCREAMING_SNAKE_CASE_ : VQDiffusionScheduler
def __init__( self : List[Any] , __A : VQModel , __A : CLIPTextModel , __A : CLIPTokenizer , __A : TransformeraDModel , __A : VQDiffusionScheduler , __A : LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=__A , transformer=__A , text_encoder=__A , tokenizer=__A , scheduler=__A , learned_classifier_free_sampling_embeddings=__A , )
def _lowerCamelCase ( self : List[str] , __A : str , __A : int , __A : List[Any] ):
__UpperCamelCase = len(__A ) if isinstance(__A , __A ) else 1
# get prompt text embeddings
__UpperCamelCase = self.tokenizer(
__A , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__A )
# duplicate text embeddings for each generation per prompt
__UpperCamelCase = prompt_embeds.repeat_interleave(__A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(__A , 1 , 1 )
else:
__UpperCamelCase = [''] * batch_size
__UpperCamelCase = text_input_ids.shape[-1]
__UpperCamelCase = self.tokenizer(
__A , padding='max_length' , max_length=__A , truncation=__A , return_tensors='pt' , )
__UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__UpperCamelCase = negative_prompt_embeds.shape[1]
__UpperCamelCase = negative_prompt_embeds.repeat(1 , __A , 1 )
__UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[str] , __A : Union[str, List[str]] , __A : int = 1_0_0 , __A : float = 5.0 , __A : float = 1.0 , __A : int = 1 , __A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[str] = "pil" , __A : bool = True , __A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A : int = 1 , ):
if isinstance(__A , __A ):
__UpperCamelCase = 1
elif isinstance(__A , __A ):
__UpperCamelCase = len(__A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__A )}''' )
__UpperCamelCase = batch_size * num_images_per_prompt
__UpperCamelCase = guidance_scale > 1.0
__UpperCamelCase = self._encode_prompt(__A , __A , __A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__A )}.''' )
# get the initial completely masked latents unless the user supplied it
__UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__UpperCamelCase = self.transformer.num_vector_embeds - 1
__UpperCamelCase = torch.full(__A , __A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
__UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__A , device=self.device )
__UpperCamelCase = self.scheduler.timesteps.to(self.device )
__UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the sample if we are doing classifier free guidance
__UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__UpperCamelCase = self.transformer(__A , encoder_hidden_states=__A , timestep=__A ).sample
if do_classifier_free_guidance:
__UpperCamelCase , __UpperCamelCase = model_output.chunk(2 )
__UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__A , dim=1 , keepdim=__A )
__UpperCamelCase = self.truncate(__A , __A )
# remove `log(0)`'s (`-inf`s)
__UpperCamelCase = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(__A , timestep=__A , sample=__A , generator=__A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A , __A )
__UpperCamelCase = self.vqvae.config.vq_embed_dim
__UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__UpperCamelCase = self.vqvae.quantize.get_codebook_entry(__A , shape=__A )
__UpperCamelCase = self.vqvae.decode(__A , force_not_quantize=__A ).sample
__UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
def _lowerCamelCase ( self : str , __A : torch.FloatTensor , __A : float ):
__UpperCamelCase , __UpperCamelCase = torch.sort(__A , 1 , descending=__A )
__UpperCamelCase = torch.exp(__A )
__UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , __A )
__UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__UpperCamelCase = keep_mask[:, :-1, :]
__UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__UpperCamelCase = log_p_x_0.clone()
__UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 399 | 1 |
'''simple docstring'''
snake_case__ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
_lowerCamelCase = Stack()
_lowerCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__UpperCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__UpperCAmelCase )
elif i == ")":
# RULE 4
_lowerCamelCase = operator_stack.peek()
operator_stack.pop()
_lowerCamelCase = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase = operators[opr](__UpperCAmelCase , __UpperCAmelCase )
operand_stack.push(__UpperCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
snake_case__ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''') | 715 | import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod() | 638 | 0 |
lowercase_ = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 74 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __lowerCAmelCase ( __a ):
snake_case : Union[List[PIL.Image.Image], np.ndarray]
snake_case : Optional[List[bool]]
snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 414 | 0 |
def __lowerCAmelCase ( ):
UpperCAmelCase_ = 0
for i in range(1 , 1001 ):
total += i**i
return str(A )[-10:]
if __name__ == "__main__":
print(solution()) | 268 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a: List[str] = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: List[Any] = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 268 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__(unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , ) -> Optional[int]:
a_ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
a_ : Optional[int] = parent
a_ : Dict = batch_size
a_ : Union[str, Any] = num_channels
a_ : Optional[int] = image_size
a_ : str = min_resolution
a_ : Optional[Any] = max_resolution
a_ : List[Any] = do_resize
a_ : str = size
a_ : List[Any] = apply_ocr
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__(a_, unittest.TestCase ):
"""simple docstring"""
_A : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase__ ( self ) -> Any:
a_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> int:
a_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """apply_ocr""" ) )
def UpperCamelCase__ ( self ) -> str:
a_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
a_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCamelCase__ ( self ) -> str:
pass
def UpperCamelCase__ ( self ) -> str:
# Initialize image_processing
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
a_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , _lowercase )
self.assertIsInstance(encoding.boxes , _lowercase )
# Test batched
a_ : Union[str, Any] = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase__ ( self ) -> Any:
# Initialize image_processing
a_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
a_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
a_ : Any = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase__ ( self ) -> List[str]:
# Initialize image_processing
a_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
a_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
a_ : Dict = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase__ ( self ) -> List[str]:
# with apply_OCR = True
a_ : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
a_ : Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
a_ : Optional[int] = image_processing(_lowercase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ : Any = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
a_ : int = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowercase )
self.assertListEqual(encoding.boxes , _lowercase )
# with apply_OCR = False
a_ : Tuple = LayoutLMvaImageProcessor(apply_ocr=_lowercase )
a_ : int = image_processing(_lowercase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 540 |
__snake_case : int = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 540 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase : Union[str, Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["DeiTFeatureExtractor"]
_lowercase : Dict = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
from __future__ import annotations
import requests
def _lowerCAmelCase ( UpperCamelCase__: str ) -> dict:
"""simple docstring"""
A = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(UpperCamelCase__ ).json()
def _lowerCAmelCase ( UpperCamelCase__: int = 10 ) -> list[dict]:
"""simple docstring"""
A = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
A = requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def _lowerCAmelCase ( UpperCamelCase__: int = 10 ) -> str:
"""simple docstring"""
A = hackernews_top_stories(UpperCamelCase__ )
return "\n".join("""* [{title}]({url})""".format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 546 | 0 |
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
from torch.utils.cpp_extension import load
_A = Path(_snake_case ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , _snake_case , with_cuda=_snake_case , extra_include_paths=[str(_snake_case )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 2 |
'''simple docstring'''
def a__ ( lowercase : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
_UpperCamelCase = 0
_UpperCamelCase = len(lowercase ) - 1
_UpperCamelCase = 0
while index >= 0:
_UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26, lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 98 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_A : Optional[int] = logging.get_logger(__name__)
_A : Any = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : str , snake_case_ : str , snake_case_ : Dict , snake_case_ : Dict ) -> Dict:
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
__lowerCAmelCase = getattr(snake_case_ , snake_case_ ).shape
else:
__lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[str] ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(snake_case_ )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , snake_case_ )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "weight" in name:
__lowerCAmelCase = """weight"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
else:
__lowerCAmelCase = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = SEWConfig()
if is_finetuned:
__lowerCAmelCase = model.wav_encoder.wav_model.cfg
else:
__lowerCAmelCase = model.cfg
__lowerCAmelCase = fs_config.conv_bias
__lowerCAmelCase = eval(fs_config.conv_feature_layers )
__lowerCAmelCase = [x[0] for x in conv_layers]
__lowerCAmelCase = [x[1] for x in conv_layers]
__lowerCAmelCase = [x[2] for x in conv_layers]
__lowerCAmelCase = """gelu"""
__lowerCAmelCase = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__lowerCAmelCase = 0.0
__lowerCAmelCase = fs_config.activation_fn.name
__lowerCAmelCase = fs_config.encoder_embed_dim
__lowerCAmelCase = 0.0_2
__lowerCAmelCase = fs_config.encoder_ffn_embed_dim
__lowerCAmelCase = 1E-5
__lowerCAmelCase = fs_config.encoder_layerdrop
__lowerCAmelCase = fs_config.encoder_attention_heads
__lowerCAmelCase = fs_config.conv_pos_groups
__lowerCAmelCase = fs_config.conv_pos
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = fs_config.encoder_layers
__lowerCAmelCase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__lowerCAmelCase = model.cfg
__lowerCAmelCase = fs_config.final_dropout
__lowerCAmelCase = fs_config.layerdrop
__lowerCAmelCase = fs_config.activation_dropout
__lowerCAmelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__lowerCAmelCase = fs_config.attention_dropout
__lowerCAmelCase = fs_config.dropout_input
__lowerCAmelCase = fs_config.dropout
__lowerCAmelCase = fs_config.mask_channel_length
__lowerCAmelCase = fs_config.mask_channel_prob
__lowerCAmelCase = fs_config.mask_length
__lowerCAmelCase = fs_config.mask_prob
__lowerCAmelCase = """Wav2Vec2FeatureExtractor"""
__lowerCAmelCase = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : Dict , snake_case_ : Dict=None , snake_case_ : Optional[int]=None , snake_case_ : Union[str, Any]=True ) -> List[Any]:
'''simple docstring'''
if is_finetuned:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__lowerCAmelCase = SEWConfig.from_pretrained(snake_case_ )
else:
__lowerCAmelCase = convert_config(model[0] , snake_case_ )
__lowerCAmelCase = model[0].eval()
__lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
if is_finetuned:
if dict_path:
__lowerCAmelCase = Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCAmelCase = target_dict.pad_index
__lowerCAmelCase = target_dict.bos_index
__lowerCAmelCase = target_dict.pad_index
__lowerCAmelCase = target_dict.bos_index
__lowerCAmelCase = target_dict.eos_index
__lowerCAmelCase = len(target_dict.symbols )
__lowerCAmelCase = os.path.join(snake_case_ , """vocab.json""" )
if not os.path.isdir(snake_case_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , snake_case_ )
__lowerCAmelCase = WavaVecaCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=snake_case_ , )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
__lowerCAmelCase = SEWForCTC(snake_case_ )
else:
__lowerCAmelCase = SEWModel(snake_case_ )
feature_extractor.save_pretrained(snake_case_ )
recursively_load_weights(snake_case_ , snake_case_ , snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_A : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_A : int = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 330 | '''simple docstring'''
def UpperCamelCase_ ( snake_case_ : list , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = 0 ) -> int:
'''simple docstring'''
__lowerCAmelCase = right or len(snake_case_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(snake_case_ , snake_case_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 699 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
return 1 if input_a == input_a else 0
def SCREAMING_SNAKE_CASE ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase__ = '''segformer'''
def __init__( self :int , __magic_name__ :Optional[int]=3 , __magic_name__ :Optional[Any]=4 , __magic_name__ :Optional[int]=[2, 2, 2, 2] , __magic_name__ :Union[str, Any]=[8, 4, 2, 1] , __magic_name__ :Optional[int]=[32, 64, 160, 256] , __magic_name__ :int=[7, 3, 3, 3] , __magic_name__ :int=[4, 2, 2, 2] , __magic_name__ :Tuple=[1, 2, 5, 8] , __magic_name__ :Any=[4, 4, 4, 4] , __magic_name__ :Any="gelu" , __magic_name__ :Optional[Any]=0.0 , __magic_name__ :Optional[int]=0.0 , __magic_name__ :str=0.1 , __magic_name__ :int=0.02 , __magic_name__ :int=0.1 , __magic_name__ :int=1E-6 , __magic_name__ :Tuple=256 , __magic_name__ :int=255 , **__magic_name__ :Union[str, Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , _lowercase , )
a = num_channels
a = num_encoder_blocks
a = depths
a = sr_ratios
a = hidden_sizes
a = patch_sizes
a = strides
a = mlp_ratios
a = num_attention_heads
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = classifier_dropout_prob
a = initializer_range
a = drop_path_rate
a = layer_norm_eps
a = decoder_hidden_size
a = kwargs.get("""reshape_last_stage""" , _lowercase )
a = semantic_loss_ignore_index
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return 12
| 468 |
"""simple docstring"""
import math
import sys
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int ):
"""simple docstring"""
if number != int(lowerCamelCase_ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
snake_case : List[str] = [-1] * (number + 1)
snake_case : str = 0
for i in range(1 , number + 1 ):
snake_case : Tuple = sys.maxsize
snake_case : Optional[Any] = int(math.sqrt(lowerCamelCase_ ) )
for j in range(1 , root + 1 ):
snake_case : List[str] = 1 + answers[i - (j**2)]
snake_case : List[str] = min(lowerCamelCase_ , lowerCamelCase_ )
snake_case : Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 449 | 0 |
from collections import defaultdict
from math import gcd
def lowerCamelCase__ ( _lowerCamelCase = 150_0000 ) ->int:
_UpperCAmelCase =defaultdict(_lowerCamelCase )
_UpperCAmelCase =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _lowerCamelCase , 2 ):
if gcd(_lowerCamelCase , _lowerCamelCase ) > 1:
continue
_UpperCAmelCase =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_lowerCamelCase , limit + 1 , _lowerCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 592 |
def lowerCamelCase__ ( _lowerCamelCase = 1000 ) ->int:
_UpperCAmelCase =2**power
_UpperCAmelCase =str(_lowerCamelCase )
_UpperCAmelCase =list(_lowerCamelCase )
_UpperCAmelCase =0
for i in list_num:
sum_of_num += int(_lowerCamelCase )
return sum_of_num
if __name__ == "__main__":
snake_case__ : List[str] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case__ : Union[str, Any] = solution(power)
print('Sum of the digits is: ', result)
| 592 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , lowercase = 1000 , lowercase = None ) -> Optional[Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowercase )
# standard deviation of the initial noise distribution
lowerCamelCase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowerCamelCase_ = 4
# running values
lowerCamelCase_ = []
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> int:
lowerCamelCase_ = num_inference_steps
lowerCamelCase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowerCamelCase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowerCamelCase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowerCamelCase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowerCamelCase_ = (1.0 - self.betas**2) ** 0.5
lowerCamelCase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowerCamelCase_ = timesteps.to(lowercase )
lowerCamelCase_ = []
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowerCamelCase_ = (self.timesteps == timestep).nonzero().item()
lowerCamelCase_ = timestep_index + 1
lowerCamelCase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowercase )
if len(self.ets ) == 1:
lowerCamelCase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowerCamelCase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowerCamelCase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowerCamelCase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowerCamelCase_ = self._get_prev_sample(lowercase , lowercase , lowercase , lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , *lowercase , **lowercase ) -> torch.FloatTensor:
return sample
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
lowerCamelCase_ = self.alphas[timestep_index]
lowerCamelCase_ = self.betas[timestep_index]
lowerCamelCase_ = self.alphas[prev_timestep_index]
lowerCamelCase_ = self.betas[prev_timestep_index]
lowerCamelCase_ = (sample - sigma * ets) / max(lowercase , 1e-8 )
lowerCamelCase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Dict:
return self.config.num_train_timesteps
| 463 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE_( self ) -> int:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCamelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase_ = CLIPTextModel(lowercase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> int:
if str(lowercase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(lowercase )
else:
lowerCamelCase_ = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCamelCase_ = 2
lowerCamelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase , device=torch.device(lowercase ) , )
lowerCamelCase_ = floats_tensor(control_image.shape , rng=random.Random(lowercase ) ).to(lowercase )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(lowercase ) ).convert("RGB" ).resize((64, 64) )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowercase ):
if isinstance(lowercase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCamelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowercase )
torch.manual_seed(0 )
lowerCamelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowercase )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase_ = CLIPTextModel(lowercase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = MultiControlNetModel([controlneta, controlneta] )
lowerCamelCase_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> List[Any]:
if str(lowercase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(lowercase )
else:
lowerCamelCase_ = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCamelCase_ = 2
lowerCamelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase , device=torch.device(lowercase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowercase , device=torch.device(lowercase ) , ),
]
lowerCamelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(lowercase ) ).to(lowercase )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(lowercase ) ).convert("RGB" ).resize((64, 64) )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**lowercase )
pipe.to(lowercase )
lowerCamelCase_ = 1_0.0
lowerCamelCase_ = 4
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = steps
lowerCamelCase_ = scale
lowerCamelCase_ = pipe(**lowercase )[0]
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = steps
lowerCamelCase_ = scale
lowerCamelCase_ = pipe(**lowercase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = steps
lowerCamelCase_ = scale
lowerCamelCase_ = pipe(**lowercase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = steps
lowerCamelCase_ = scale
lowerCamelCase_ = pipe(**lowercase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowercase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
lowerCamelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=lowercase , controlnet=lowercase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ = "evil space-punk bird"
lowerCamelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
lowerCamelCase_ = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
lowerCamelCase_ = pipe(
lowercase , lowercase , control_image=lowercase , generator=lowercase , output_type="np" , num_inference_steps=50 , strength=0.6 , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2
| 463 | 1 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
a : str = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , a_ : int = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError("Unsupported Group" )
__snake_case = primes[group]["prime"]
__snake_case = primes[group]["generator"]
__snake_case = int(hexlify(urandom(32 ) ) , base=16 )
def A ( self : Dict ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = pow(self.generator , self.__private_key , self.prime )
return hex(a_ )[2:]
def A ( self : Tuple , a_ : int ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(a_ , (self.prime - 1) // 2 , self.prime ) == 1
)
def A ( self : Tuple , a_ : str ):
"""simple docstring"""
__snake_case = int(a_ , base=16 )
if not self.is_valid_public_key(a_ ):
raise ValueError("Invalid public key" )
__snake_case = pow(a_ , self.__private_key , self.prime )
return shaaaa(str(a_ ).encode() ).hexdigest()
@staticmethod
def A ( a_ : int , a_ : int ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(a_ , (prime - 1) // 2 , a_ ) == 1
)
@staticmethod
def A ( a_ : str , a_ : str , a_ : int = 14 ):
"""simple docstring"""
__snake_case = int(a_ , base=16 )
__snake_case = int(a_ , base=16 )
__snake_case = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(a_ , a_ ):
raise ValueError("Invalid public key" )
__snake_case = pow(a_ , a_ , a_ )
return shaaaa(str(a_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""PoolFormerFeatureExtractor"""]
_lowerCAmelCase = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 137 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase ( _a ) -> str:
UpperCAmelCase_: List[str] = args.pruning_method
UpperCAmelCase_: List[str] = args.threshold
UpperCAmelCase_: Union[str, Any] = args.model_name_or_path.rstrip("/" )
UpperCAmelCase_: str = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
UpperCAmelCase_: Tuple = torch.load(os.path.join(_a ,"pytorch_model.bin" ) )
UpperCAmelCase_: Union[str, Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCAmelCase_: List[str] = tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
UpperCAmelCase_: Dict = tensor
print(f"Copied layer {name}" )
elif "bias" in name:
UpperCAmelCase_: List[str] = tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
UpperCAmelCase_: int = MagnitudeBinarizer.apply(inputs=_a ,threshold=_a )
UpperCAmelCase_: List[Any] = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCAmelCase_: str = name[:-6]
UpperCAmelCase_: str = model[f"{prefix_}mask_scores"]
UpperCAmelCase_: Optional[int] = TopKBinarizer.apply(_a ,_a )
UpperCAmelCase_: Tuple = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCAmelCase_: int = name[:-6]
UpperCAmelCase_: int = model[f"{prefix_}mask_scores"]
UpperCAmelCase_: List[str] = ThresholdBinarizer.apply(_a ,_a ,_a )
UpperCAmelCase_: Any = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCAmelCase_: Union[str, Any] = name[:-6]
UpperCAmelCase_: str = model[f"{prefix_}mask_scores"]
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = -0.1, 1.1
UpperCAmelCase_: Dict = torch.sigmoid(_a )
UpperCAmelCase_: List[Any] = s * (r - l) + l
UpperCAmelCase_: Union[str, Any] = s_bar.clamp(min=0.0 ,max=1.0 )
UpperCAmelCase_: Union[str, Any] = tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
UpperCAmelCase_: int = os.path.join(
os.path.dirname(_a ) ,f"bertarized_{os.path.basename(_a )}" )
if not os.path.isdir(_a ):
shutil.copytree(_a ,_a )
print(f"\nCreated folder {target_model_path}" )
torch.save(_a ,os.path.join(_a ,"pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCAmelCase = parser.parse_args()
main(args) | 137 | 1 |
__snake_case = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def _lowercase ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
UpperCamelCase = 0
UpperCamelCase = 0
while place < len(SCREAMING_SNAKE_CASE_ ):
if (place + 1 < len(SCREAMING_SNAKE_CASE_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = []
for arabic, roman in ROMAN:
((UpperCamelCase) , (UpperCamelCase)) = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
result.append(roman * factor )
if number == 0:
break
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 |
import qiskit
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 2 ):
"""simple docstring"""
UpperCamelCase = qubits
# Using Aer's simulator
UpperCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
UpperCamelCase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , SCREAMING_SNAKE_CASE_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(SCREAMING_SNAKE_CASE_ ) ) , list(range(SCREAMING_SNAKE_CASE_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCamelCase = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 181 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
A__ : List[Any] =nn.ModuleList(lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : List[torch.tensor] , lowerCAmelCase_ : List[float] , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ , self.nets ) ):
A__ , A__ : Union[str, Any] =controlnet(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# merge samples
if i == 0:
A__ , A__ : Optional[int] =down_samples, mid_sample
else:
A__ : Optional[Any] =[
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowercase__ ( self : Any , lowerCAmelCase_ : Union[str, os.PathLike] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Callable = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[str] = None , ) -> List[Any]:
'''simple docstring'''
A__ : Dict =0
A__ : str =save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCAmelCase_ , is_main_process=lowerCAmelCase_ , save_function=lowerCAmelCase_ , safe_serialization=lowerCAmelCase_ , variant=lowerCAmelCase_ , )
idx += 1
A__ : List[str] =model_path_to_save + f"_{idx}"
@classmethod
def lowercase__ ( cls : int , lowerCAmelCase_ : Optional[Union[str, os.PathLike]] , **lowerCAmelCase_ : Any ) -> Any:
'''simple docstring'''
A__ : Optional[Any] =0
A__ : str =[]
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
A__ : List[str] =pretrained_model_path
while os.path.isdir(lowerCAmelCase_ ):
A__ : Dict =ControlNetModel.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
controlnets.append(lowerCAmelCase_ )
idx += 1
A__ : Union[str, Any] =pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCAmelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCAmelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCAmelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCAmelCase_ )
| 215 |
'''simple docstring'''
import argparse
import os
import re
__snake_case : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
__snake_case : Optional[Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__snake_case : Tuple = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__snake_case : Dict = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__snake_case : Union[str, Any] = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__snake_case : Any = re.compile(r'\[([^\]]+)\]')
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
A__ : Optional[int] =_re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def __lowerCamelCase ( __snake_case : str, __snake_case : Union[str, Any]="", __snake_case : Tuple=None, __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
A__ : str =0
A__ : List[Any] =code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
A__ : Union[str, Any] =["""\n""".join(lines[:index] )]
else:
A__ : Tuple =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ : int =[lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__snake_case ) )
if index < len(__snake_case ) - 1:
A__ : Any =[lines[index + 1]]
index += 1
else:
A__ : List[str] =[]
else:
blocks.append("""\n""".join(__snake_case ) )
A__ : Any =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append("""\n""".join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __lowerCamelCase ( __snake_case : Dict ) -> Dict:
"""simple docstring"""
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace("""_""", """""" )
return _inner
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
def noop(__snake_case : int ):
return x
if key is None:
A__ : Optional[int] =noop
# Constants are all uppercase, they go first.
A__ : Tuple =[obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ : List[str] =[obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
A__ : Union[str, Any] =[obj for obj in objects if not key(__snake_case )[0].isupper()]
A__ : Union[str, Any] =ignore_underscore(__snake_case )
return sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case )
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def _replace(__snake_case : Any ):
A__ : str =match.groups()[0]
if "," not in imports:
return f"[{imports}]"
A__ : Tuple =[part.strip().replace("""\"""", """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : int =keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(__snake_case )] ) + "]"
A__ : int =import_statement.split("""\n""" )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ : Optional[int] =2 if lines[1].strip() == """[""" else 1
A__ : Optional[int] =[(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ : List[str] =sort_objects(__snake_case, key=lambda __snake_case : x[1] )
A__ : Tuple =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ : List[Any] =_re_bracket_content.sub(_replace, lines[1] )
else:
A__ : List[str] =[part.strip().replace("""\"""", """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : List[Any] =keys[:-1]
A__ : List[Any] =get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
A__ : Union[str, Any] =_re_bracket_content.sub(_replace, __snake_case )
return import_statement
def __lowerCamelCase ( __snake_case : List[str], __snake_case : str=True ) -> Optional[int]:
"""simple docstring"""
with open(__snake_case, """r""" ) as f:
A__ : str =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ : Any =split_code_in_indented_blocks(
__snake_case, start_prompt="""_import_structure = {""", end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ : Optional[Any] =main_blocks[block_idx]
A__ : Optional[Any] =block.split("""\n""" )
# Get to the start of the imports.
A__ : Optional[Any] =0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ : Dict =len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ : str ="""\n""".join(block_lines[line_idx:-1] )
A__ : Dict =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ : Dict =split_code_in_indented_blocks(__snake_case, indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ : int =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ : int =[(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ : str =[(i, key) for i, key in enumerate(__snake_case ) if key is not None]
A__ : Optional[int] =[x[0] for x in sorted(__snake_case, key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ : Optional[Any] =0
A__ : int =[]
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A__ : Union[str, Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
A__ : Any ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(__snake_case, """w""" ) as f:
f.write("""\n""".join(__snake_case ) )
def __lowerCamelCase ( __snake_case : Dict=True ) -> Any:
"""simple docstring"""
A__ : Optional[Any] =[]
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
A__ : Tuple =sort_imports(os.path.join(__snake_case, """__init__.py""" ), check_only=__snake_case )
if result:
A__ : str =[os.path.join(__snake_case, """__init__.py""" )]
if len(__snake_case ) > 0:
raise ValueError(f"Would overwrite {len(__snake_case )} files, run `make style`." )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__snake_case : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 215 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''', '''False''' ) ) is not True, reason='''Skipping test because should only be run when releasing minor transformers version''', )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=__A , )
assert hasattr(self , "env" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
UpperCAmelCase = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__A , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="py36" , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
TrainingJobAnalytics(__A ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __A )
| 711 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __magic_name__ :
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : str=1_00 , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Optional[int]=30 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Tuple=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : int=10 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=[0, 1, 2, 3] , ) -> str:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = 1_00
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = out_indices
UpperCAmelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = BeitModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = BeitForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> str:
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = BeitForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = BeitForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = BeitForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( A__, A__, unittest.TestCase ):
lowercase : Optional[Any] =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase : Optional[Any] =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase : Dict =False
lowercase : int =False
lowercase : Union[str, Any] =False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase = BeitModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(UpperCamelCase__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
UpperCAmelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
UpperCAmelCase = model(**UpperCamelCase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase = False
UpperCAmelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
UpperCAmelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
UpperCAmelCase = model(**UpperCamelCase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = BeitModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_() -> str:
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(UpperCamelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).pixel_values.to(UpperCamelCase__ )
# prepare bool_masked_pos
UpperCAmelCase = torch.ones((1, 1_96) , dtype=torch.bool ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(pixel_values=UpperCamelCase__ , bool_masked_pos=UpperCamelCase__ )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , UpperCamelCase__ )
UpperCAmelCase = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase__ , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(UpperCamelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**UpperCamelCase__ )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , UpperCamelCase__ )
UpperCAmelCase = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
UpperCAmelCase = 2_81
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
UpperCamelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**UpperCamelCase__ )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , UpperCamelCase__ )
UpperCAmelCase = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
UpperCAmelCase = 23_96
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase = model.to(UpperCamelCase__ )
UpperCAmelCase = BeitImageProcessor(do_resize=UpperCamelCase__ , size=6_40 , do_center_crop=UpperCamelCase__ )
UpperCAmelCase = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase = Image.open(ds[0]["file"] )
UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**UpperCamelCase__ )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , UpperCamelCase__ )
UpperCAmelCase = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=UpperCamelCase__ , )
else:
UpperCAmelCase = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase = model.to(UpperCamelCase__ )
UpperCAmelCase = BeitImageProcessor(do_resize=UpperCamelCase__ , size=6_40 , do_center_crop=UpperCamelCase__ )
UpperCAmelCase = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase = Image.open(ds[0]["file"] )
UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**UpperCamelCase__ )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(5_00, 3_00)] )
UpperCAmelCase = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
UpperCAmelCase = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 457 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__UpperCamelCase = logging.get_logger(__name__)
# General docstring
__UpperCamelCase = 'MobileNetV1Config'
# Base docstring
__UpperCamelCase = 'google/mobilenet_v1_1.0_224'
__UpperCamelCase = [1, 1_0_2_4, 7, 7]
# Image classification docstring
__UpperCamelCase = 'google/mobilenet_v1_1.0_224'
__UpperCamelCase = 'tabby, tabby cat'
__UpperCamelCase = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCamelCase_( _A :Optional[Any] , _A :str , _A :Optional[Any]=None )-> str:
UpperCamelCase__ = {}
if isinstance(_A , _A ):
UpperCamelCase__ = model.mobilenet_va
else:
UpperCamelCase__ = model
UpperCamelCase__ = "MobilenetV1/Conv2d_0/"
UpperCamelCase__ = backbone.conv_stem.convolution.weight
UpperCamelCase__ = backbone.conv_stem.normalization.bias
UpperCamelCase__ = backbone.conv_stem.normalization.weight
UpperCamelCase__ = backbone.conv_stem.normalization.running_mean
UpperCamelCase__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
UpperCamelCase__ = i + 1
UpperCamelCase__ = i * 2
UpperCamelCase__ = backbone.layer[pt_index]
UpperCamelCase__ = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
UpperCamelCase__ = pointer.convolution.weight
UpperCamelCase__ = pointer.normalization.bias
UpperCamelCase__ = pointer.normalization.weight
UpperCamelCase__ = pointer.normalization.running_mean
UpperCamelCase__ = pointer.normalization.running_var
UpperCamelCase__ = backbone.layer[pt_index + 1]
UpperCamelCase__ = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
UpperCamelCase__ = pointer.convolution.weight
UpperCamelCase__ = pointer.normalization.bias
UpperCamelCase__ = pointer.normalization.weight
UpperCamelCase__ = pointer.normalization.running_mean
UpperCamelCase__ = pointer.normalization.running_var
if isinstance(_A , _A ):
UpperCamelCase__ = "MobilenetV1/Logits/Conv2d_1c_1x1/"
UpperCamelCase__ = model.classifier.weight
UpperCamelCase__ = model.classifier.bias
return tf_to_pt_map
def UpperCamelCase_( _A :List[str] , _A :Any , _A :List[Any] )-> int:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
UpperCamelCase__ = tf.train.list_variables(_A )
UpperCamelCase__ = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
UpperCamelCase__ = tf.train.load_variable(_A , _A )
UpperCamelCase__ = array
# Build TF to PyTorch weights loading map
UpperCamelCase__ = _build_tf_to_pytorch_map(_A , _A , _A )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
UpperCamelCase__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
UpperCamelCase__ = np.transpose(_A , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
UpperCamelCase__ = array.squeeze().transpose()
else:
UpperCamelCase__ = np.transpose(_A , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
UpperCamelCase__ = torch.from_numpy(_A )
tf_weights.pop(_A , _A )
tf_weights.pop(name + "/RMSProp" , _A )
tf_weights.pop(name + "/RMSProp_1" , _A )
tf_weights.pop(name + "/ExponentialMovingAverage" , _A )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def UpperCamelCase_( _A :torch.Tensor , _A :nn.Convad )-> torch.Tensor:
UpperCamelCase__, UpperCamelCase__ = features.shape[-2:]
UpperCamelCase__, UpperCamelCase__ = conv_layer.stride
UpperCamelCase__, UpperCamelCase__ = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase__ = max(kernel_height - stride_height , 0 )
else:
UpperCamelCase__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
UpperCamelCase__ = max(kernel_width - stride_width , 0 )
else:
UpperCamelCase__ = max(kernel_width - (in_width % stride_width) , 0 )
UpperCamelCase__ = pad_along_width // 2
UpperCamelCase__ = pad_along_width - pad_left
UpperCamelCase__ = pad_along_height // 2
UpperCamelCase__ = pad_along_height - pad_top
UpperCamelCase__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_A , _A , "constant" , 0.0 )
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1 , snake_case = 1 , snake_case = False , snake_case = True , snake_case = True , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
UpperCamelCase__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCamelCase__ = nn.Convad(
in_channels=snake_case , out_channels=snake_case , kernel_size=snake_case , stride=snake_case , padding=snake_case , groups=snake_case , bias=snake_case , padding_mode="zeros" , )
if use_normalization:
UpperCamelCase__ = nn.BatchNormad(
num_features=snake_case , eps=config.layer_norm_eps , momentum=0.9997 , affine=snake_case , track_running_stats=snake_case , )
else:
UpperCamelCase__ = None
if use_activation:
if isinstance(snake_case , snake_case ):
UpperCamelCase__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , snake_case ):
UpperCamelCase__ = ACTaFN[config.hidden_act]
else:
UpperCamelCase__ = config.hidden_act
else:
UpperCamelCase__ = None
def snake_case__ ( self , snake_case ):
'''simple docstring'''
if self.config.tf_padding:
UpperCamelCase__ = apply_tf_padding(snake_case , self.convolution )
UpperCamelCase__ = self.convolution(snake_case )
if self.normalization is not None:
UpperCamelCase__ = self.normalization(snake_case )
if self.activation is not None:
UpperCamelCase__ = self.activation(snake_case )
return features
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Tuple = MobileNetVaConfig
_UpperCamelCase : Union[str, Any] = load_tf_weights_in_mobilenet_va
_UpperCamelCase : Tuple = 'mobilenet_v1'
_UpperCamelCase : Union[str, Any] = 'pixel_values'
_UpperCamelCase : Any = False
def snake_case__ ( self , snake_case ):
'''simple docstring'''
if isinstance(snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__UpperCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__UpperCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , UpperCAmelCase , )
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case = True ):
'''simple docstring'''
super().__init__(snake_case )
UpperCamelCase__ = config
UpperCamelCase__ = 32
UpperCamelCase__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCamelCase__ = MobileNetVaConvLayer(
snake_case , in_channels=config.num_channels , out_channels=snake_case , kernel_size=3 , stride=2 , )
UpperCamelCase__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase__ = nn.ModuleList()
for i in range(13 ):
UpperCamelCase__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
snake_case , in_channels=snake_case , out_channels=snake_case , kernel_size=3 , stride=strides[i] , groups=snake_case , ) )
self.layer.append(
MobileNetVaConvLayer(
snake_case , in_channels=snake_case , out_channels=snake_case , kernel_size=1 , ) )
UpperCamelCase__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def snake_case__ ( self , snake_case ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self , snake_case = None , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
UpperCamelCase__ = self.conv_stem(snake_case )
UpperCamelCase__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCamelCase__ = layer_module(snake_case )
if output_hidden_states:
UpperCamelCase__ = all_hidden_states + (hidden_states,)
UpperCamelCase__ = hidden_states
if self.pooler is not None:
UpperCamelCase__ = torch.flatten(self.pooler(snake_case ) , start_dim=1 )
else:
UpperCamelCase__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=snake_case , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCamelCase__ = config.num_labels
UpperCamelCase__ = MobileNetVaModel(snake_case )
UpperCamelCase__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase__ = nn.Dropout(config.classifier_dropout_prob , inplace=snake_case )
UpperCamelCase__ = nn.Linear(snake_case , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ = self.mobilenet_va(snake_case , output_hidden_states=snake_case , return_dict=snake_case )
UpperCamelCase__ = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase__ = self.classifier(self.dropout(snake_case ) )
UpperCamelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase__ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase__ = "single_label_classification"
else:
UpperCamelCase__ = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCamelCase__ = MSELoss()
if self.num_labels == 1:
UpperCamelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase__ = loss_fct(snake_case , snake_case )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase__ = CrossEntropyLoss()
UpperCamelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase__ = BCEWithLogitsLoss()
UpperCamelCase__ = loss_fct(snake_case , snake_case )
if not return_dict:
UpperCamelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states , )
| 551 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 551 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : int = 384
_lowerCamelCase : Union[str, Any] = 7
if "tiny" in model_name:
_lowerCamelCase : Optional[Any] = 96
_lowerCamelCase : Dict = (2, 2, 6, 2)
_lowerCamelCase : Dict = (3, 6, 12, 24)
elif "small" in model_name:
_lowerCamelCase : Union[str, Any] = 96
_lowerCamelCase : Dict = (2, 2, 18, 2)
_lowerCamelCase : Dict = (3, 6, 12, 24)
elif "base" in model_name:
_lowerCamelCase : Any = 128
_lowerCamelCase : Optional[Any] = (2, 2, 18, 2)
_lowerCamelCase : str = (4, 8, 16, 32)
_lowerCamelCase : List[Any] = 12
_lowerCamelCase : Any = 512
elif "large" in model_name:
_lowerCamelCase : List[str] = 192
_lowerCamelCase : List[Any] = (2, 2, 18, 2)
_lowerCamelCase : Union[str, Any] = (6, 12, 24, 48)
_lowerCamelCase : int = 12
_lowerCamelCase : int = 768
# set label information
_lowerCamelCase : List[Any] = 150
_lowerCamelCase : List[str] = 'huggingface/label-files'
_lowerCamelCase : str = 'ade20k-id2label.json'
_lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
_lowerCamelCase : Optional[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
_lowerCamelCase : str = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = SwinConfig(
embed_dim=_lowercase , depths=_lowercase , num_heads=_lowercase , window_size=_lowercase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
_lowerCamelCase : Tuple = UperNetConfig(
backbone_config=_lowercase , auxiliary_in_channels=_lowercase , num_labels=_lowercase , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Any = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = dct.pop(_lowercase )
_lowerCamelCase : List[str] = val
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
_lowerCamelCase : str = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Dict = in_proj_weight[:dim, :]
_lowerCamelCase : List[str] = in_proj_bias[: dim]
_lowerCamelCase : Dict = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : Optional[int] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : int = in_proj_bias[-dim :]
# fmt: on
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Dict = x.shape
_lowerCamelCase : Dict = x.reshape(_lowercase , 4 , in_channel // 4 )
_lowerCamelCase : Optional[Any] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_lowercase , _lowercase )
return x
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = x.shape
_lowerCamelCase : Optional[Any] = x.reshape(_lowercase , in_channel // 4 , 4 )
_lowerCamelCase : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_lowercase , _lowercase )
return x
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : str = x.shape[0]
_lowerCamelCase : List[Any] = x.reshape(4 , in_channel // 4 )
_lowerCamelCase : Union[str, Any] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_lowercase )
return x
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Dict = x.shape[0]
_lowerCamelCase : Any = x.reshape(in_channel // 4 , 4 )
_lowerCamelCase : List[str] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_lowercase )
return x
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Dict = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
_lowerCamelCase : Dict = model_name_to_url[model_name]
_lowerCamelCase : Any = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , file_name=_lowercase )[
'state_dict'
]
for name, param in state_dict.items():
print(_lowercase , param.shape )
_lowerCamelCase : str = get_upernet_config(_lowercase )
_lowerCamelCase : List[Any] = UperNetForSemanticSegmentation(_lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCamelCase : Tuple = state_dict.pop(_lowercase )
if "bn" in key:
_lowerCamelCase : str = key.replace("""bn""" , """batch_norm""" )
_lowerCamelCase : Union[str, Any] = val
# rename keys
_lowerCamelCase : str = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_q_k_v(_lowercase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_lowerCamelCase : Any = reverse_correct_unfold_reduction_order(_lowercase )
if "norm" in key:
_lowerCamelCase : Tuple = reverse_correct_unfold_norm_order(_lowercase )
model.load_state_dict(_lowercase )
# verify on image
_lowerCamelCase : List[str] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowerCamelCase : List[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert("""RGB""" )
_lowerCamelCase : str = SegformerImageProcessor()
_lowerCamelCase : Optional[int] = processor(_lowercase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_lowerCamelCase : str = model(_lowercase )
_lowerCamelCase : Tuple = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_lowerCamelCase : Dict = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
_lowerCamelCase : int = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
_lowerCamelCase : Any = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 704 | lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15 | 0 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = None , ):
"""simple docstring"""
super().__init__()
_snake_case = initial_learning_rate
_snake_case = warmup_steps
_snake_case = power
_snake_case = decay_schedule_fn
_snake_case = name
def __call__( self , lowerCAmelCase_ ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_snake_case = tf.cast(lowerCAmelCase_ , tf.floataa )
_snake_case = tf.cast(self.warmup_steps , tf.floataa )
_snake_case = global_step_float / warmup_steps_float
_snake_case = self.initial_learning_rate * tf.math.pow(lowerCAmelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCAmelCase_ , )
def lowerCamelCase ( self ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A = 0.0 , __A = 0.9 , __A = 0.9_9_9 , __A = 1e-8 , __A = None , __A = None , __A = 0.0 , __A = 1.0 , __A = None , ) -> List[str]:
_snake_case = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__A , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__A , )
if num_warmup_steps:
_snake_case = WarmUp(
initial_learning_rate=__A , decay_schedule_fn=__A , warmup_steps=__A , )
if weight_decay_rate > 0.0:
_snake_case = AdamWeightDecay(
learning_rate=__A , weight_decay_rate=__A , beta_a=__A , beta_a=__A , epsilon=__A , clipnorm=__A , global_clipnorm=__A , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=__A , )
else:
_snake_case = tf.keras.optimizers.Adam(
learning_rate=__A , beta_a=__A , beta_a=__A , epsilon=__A , clipnorm=__A , global_clipnorm=__A , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ = 0.001 , lowerCAmelCase_ = 0.9 , lowerCAmelCase_ = 0.999 , lowerCAmelCase_ = 1E-7 , lowerCAmelCase_ = False , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "AdamWeightDecay" , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = weight_decay_rate
_snake_case = include_in_weight_decay
_snake_case = exclude_from_weight_decay
@classmethod
def lowerCamelCase ( cls , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = {'WarmUp': WarmUp}
return super(lowerCAmelCase_ , cls ).from_config(lowerCAmelCase_ , custom_objects=lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
super(lowerCAmelCase_ , self )._prepare_local(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case = list(zip(*lowerCAmelCase_ ) )
return super(lowerCAmelCase_ , self ).apply_gradients(zip(lowerCAmelCase_ , lowerCAmelCase_ ) , name=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_snake_case = apply_state or {}
_snake_case = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_snake_case = self._fallback_apply_state(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case , _snake_case = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase_ )
_snake_case = self._decay_weights_op(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase_ , self )._resource_apply_dense(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case , _snake_case = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase_ )
_snake_case = self._decay_weights_op(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase_ , self )._resource_apply_sparse(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ) is not None:
return False
return True
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self ):
"""simple docstring"""
_snake_case = []
_snake_case = None
@property
def lowerCamelCase ( self ):
"""simple docstring"""
if self._accum_steps is None:
_snake_case = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCAmelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCamelCase ( self ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not self._gradients:
_snake_case = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase_ ) , trainable=lowerCAmelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCAmelCase_ ) != len(self._gradients ):
raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(lowerCAmelCase_ )}' )
for accum_gradient, gradient in zip(self._gradients , lowerCAmelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase_ )
self._accum_steps.assign_add(1 )
def lowerCamelCase ( self ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase_ ) )
| 495 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> bool:
# Base Case
if curr_ind == len(__A ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__A ) ):
if valid_connection(__A , __A , __A , __A ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(__A , __A , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def SCREAMING_SNAKE_CASE__ ( __A , __A = 0 ) -> list[int]:
_snake_case = [-1] * (len(__A ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__A , __A , 1 ) else []
| 495 | 1 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_UpperCAmelCase : Tuple = logging.getLogger(__name__)
_UpperCAmelCase : Optional[Any] = """pytorch_model.bin"""
@dataclasses.dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
__UpperCamelCase : Optional[str] = dataclasses.field(
default=__A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCamelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
__UpperCamelCase : Optional[str] = dataclasses.field(
default=__A , metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCamelCase : Optional[str] = dataclasses.field(
default=__A , metadata={'help': 'The name of the task to train on.'} , )
__UpperCamelCase : Optional[List[str]] = dataclasses.field(
default=__A , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
__UpperCamelCase : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
__UpperCamelCase : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
__UpperCamelCase : Optional[int] = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
__UpperCamelCase : Optional[bool] = dataclasses.field(
default=__A , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
__UpperCamelCase : Optional[bool] = dataclasses.field(
default=__A , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
__UpperCamelCase : Optional[bool] = dataclasses.field(
default=__A , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
__UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
__UpperCamelCase : Optional[int] = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCamelCase : Optional[int] = dataclasses.field(
default=__A , metadata={'help': 'Random seed for initialization.'} , )
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = datasets.concatenate_datasets([infer_input, infer_output], axis=1)
if args.do_filter_by_confidence:
__lowerCAmelCase = dataset.filter(lambda lowerCamelCase: example["probability"] > args.confidence_threshold)
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__lowerCAmelCase = int(eval_result * len(lowerCamelCase))
print(lowerCamelCase)
__lowerCAmelCase = dataset.sort('''probability''', reverse=lowerCamelCase)
__lowerCAmelCase = dataset.select(range(lowerCamelCase))
__lowerCAmelCase = dataset.remove_columns(['''label''', '''probability'''])
__lowerCAmelCase = dataset.rename_column('''prediction''', '''label''')
__lowerCAmelCase = dataset.map(lambda lowerCamelCase: {"label": idalabel[example["label"]]})
__lowerCAmelCase = dataset.shuffle(seed=args.seed)
__lowerCAmelCase = os.path.join(lowerCamelCase, F"""train_pseudo.{args.data_file_extension}""")
if args.data_file_extension == "csv":
dataset.to_csv(lowerCamelCase, index=lowerCamelCase)
else:
dataset.to_json(lowerCamelCase)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase):
__lowerCAmelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__lowerCAmelCase = STModelArguments(model_name_or_path=lowerCamelCase)
__lowerCAmelCase = STDataArguments(train_file=lowerCamelCase, infer_file=lowerCamelCase)
__lowerCAmelCase = STTrainingArguments(output_dir=lowerCamelCase)
__lowerCAmelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCamelCase).items():
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase)
for key, value in kwargs.items():
if hasattr(lowerCamelCase, lowerCamelCase):
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# Sanity checks
__lowerCAmelCase = {}
__lowerCAmelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__lowerCAmelCase = args.train_file
__lowerCAmelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__lowerCAmelCase = args.eval_file
for key in data_files:
__lowerCAmelCase = data_files[key].split('''.''')[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
__lowerCAmelCase = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
logger.info('''Creating the initial data directory for self-training...''')
__lowerCAmelCase = F"""{args.output_dir}/self-train_iter-{{}}""".format
__lowerCAmelCase = data_dir_format(0)
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=lowerCamelCase)
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
accelerator.wait_for_everyone()
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = False
# Show the progress bar
__lowerCAmelCase = tqdm(range(args.max_selftrain_iterations), disable=not accelerator.is_local_main_process)
# Self-train
for iteration in range(0, int(args.max_selftrain_iterations)):
__lowerCAmelCase = data_dir_format(lowerCamelCase)
assert os.path.exists(lowerCamelCase)
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__lowerCAmelCase = os.path.join(lowerCamelCase, '''stage-1''')
__lowerCAmelCase = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCamelCase, lowerCamelCase):
arguments_dict.update({key: value})
__lowerCAmelCase = os.path.join(lowerCamelCase, '''best-checkpoint''', lowerCamelCase)
if os.path.exists(lowerCamelCase):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''', lowerCamelCase, lowerCamelCase, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''', lowerCamelCase)
finetune(**lowerCamelCase)
accelerator.wait_for_everyone()
assert os.path.exists(lowerCamelCase)
logger.info('''Self-training job completed: iteration: %d, stage: 1.''', lowerCamelCase)
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__lowerCAmelCase = os.path.join(lowerCamelCase, '''best-checkpoint''')
__lowerCAmelCase = os.path.join(lowerCamelCase, '''stage-2''')
# Update arguments_dict
__lowerCAmelCase = model_path
__lowerCAmelCase = data_files['''train''']
__lowerCAmelCase = current_output_dir
__lowerCAmelCase = os.path.join(lowerCamelCase, '''best-checkpoint''', lowerCamelCase)
if os.path.exists(lowerCamelCase):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''', lowerCamelCase, lowerCamelCase, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''', lowerCamelCase)
finetune(**lowerCamelCase)
accelerator.wait_for_everyone()
assert os.path.exists(lowerCamelCase)
logger.info('''Self-training job completed: iteration: %d, stage: 2.''', lowerCamelCase)
__lowerCAmelCase = iteration
__lowerCAmelCase = data_dir_format(iteration + 1)
__lowerCAmelCase = AutoConfig.from_pretrained(os.path.join(lowerCamelCase, '''best-checkpoint'''))
__lowerCAmelCase = config.idalabel
__lowerCAmelCase = os.path.join(lowerCamelCase, '''eval_results_best-checkpoint.json''')
__lowerCAmelCase = os.path.join(lowerCamelCase, '''test_results_best-checkpoint.json''')
assert os.path.exists(lowerCamelCase)
with open(lowerCamelCase, '''r''') as f:
__lowerCAmelCase = float(json.load(lowerCamelCase)[args.eval_metric])
__lowerCAmelCase = os.path.join(lowerCamelCase, '''infer_output_best-checkpoint.csv''')
assert os.path.exists(lowerCamelCase)
# Loading the dataset from local csv or json files.
__lowerCAmelCase = load_dataset(args.data_file_extension, data_files={'''data''': data_files['''infer''']})['''data''']
__lowerCAmelCase = load_dataset('''csv''', data_files={'''data''': infer_output_file})['''data''']
if accelerator.is_main_process:
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
shutil.copy(lowerCamelCase, os.path.join(lowerCamelCase, F"""eval_results_iter-{iteration}.json"""))
if os.path.exists(lowerCamelCase):
shutil.copy(lowerCamelCase, os.path.join(lowerCamelCase, F"""test_results_iter-{iteration}.json"""))
create_pseudo_labeled_data(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
accelerator.wait_for_everyone()
__lowerCAmelCase = os.path.join(lowerCamelCase, F"""train_pseudo.{args.data_file_extension}""")
if args.evaluation_strategy != IntervalStrategy.NO.value:
__lowerCAmelCase = eval_result
if best_iteration is None:
__lowerCAmelCase = new_iteration
__lowerCAmelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__lowerCAmelCase = new_iteration
__lowerCAmelCase = new_eval_result
__lowerCAmelCase = 0
else:
if new_eval_result == best_eval_result:
__lowerCAmelCase = new_iteration
__lowerCAmelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__lowerCAmelCase = True
progress_bar.update(1)
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''', lowerCamelCase)
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, lowerCamelCase)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCamelCase, F"""eval_results_iter-{iteration}.json"""), os.path.join(lowerCamelCase, '''eval_results_best-iteration.json'''), )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''', args.max_selftrain_iterations - 1)
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, lowerCamelCase)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCamelCase, F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json"""), os.path.join(lowerCamelCase, '''eval_results_best-iteration.json'''), )
| 474 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase = None , ):
super().__init__()
self.register_modules(transformer=__lowercase , vae=__lowercase , scheduler=__lowercase )
# create a imagenet -> id dictionary for easier use
__lowerCAmelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__lowerCAmelCase = int(__lowercase )
__lowerCAmelCase = dict(sorted(self.labels.items() ) )
def _snake_case (self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
__lowerCAmelCase = list(__lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , __lowercase , __lowercase = 4.0 , __lowercase = None , __lowercase = 50 , __lowercase = "pil" , __lowercase = True , ):
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = self.transformer.config.sample_size
__lowerCAmelCase = self.transformer.config.in_channels
__lowerCAmelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__lowercase , device=self.device , dtype=self.transformer.dtype , )
__lowerCAmelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowerCAmelCase = torch.tensor(__lowercase , device=self.device ).reshape(-1 )
__lowerCAmelCase = torch.tensor([10_00] * batch_size , device=self.device )
__lowerCAmelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowerCAmelCase = latent_model_input[: len(__lowercase ) // 2]
__lowerCAmelCase = torch.cat([half, half] , dim=0 )
__lowerCAmelCase = self.scheduler.scale_model_input(__lowercase , __lowercase )
__lowerCAmelCase = t
if not torch.is_tensor(__lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowerCAmelCase = latent_model_input.device.type == '''mps'''
if isinstance(__lowercase , __lowercase ):
__lowerCAmelCase = torch.floataa if is_mps else torch.floataa
else:
__lowerCAmelCase = torch.intaa if is_mps else torch.intaa
__lowerCAmelCase = torch.tensor([timesteps] , dtype=__lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCAmelCase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowerCAmelCase = self.transformer(
__lowercase , timestep=__lowercase , class_labels=__lowercase ).sample
# perform guidance
if guidance_scale > 1:
__lowerCAmelCase , __lowerCAmelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowerCAmelCase , __lowerCAmelCase = torch.split(__lowercase , len(__lowercase ) // 2 , dim=0 )
__lowerCAmelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowerCAmelCase = torch.cat([half_eps, half_eps] , dim=0 )
__lowerCAmelCase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowerCAmelCase , __lowerCAmelCase = torch.split(__lowercase , __lowercase , dim=1 )
else:
__lowerCAmelCase = noise_pred
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
if guidance_scale > 1:
__lowerCAmelCase , __lowerCAmelCase = latent_model_input.chunk(2 , dim=0 )
else:
__lowerCAmelCase = latent_model_input
__lowerCAmelCase = 1 / self.vae.config.scaling_factor * latents
__lowerCAmelCase = self.vae.decode(__lowercase ).sample
__lowerCAmelCase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCAmelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__lowercase )
| 474 | 1 |
from __future__ import annotations
from random import random
class lowercase__ :
def __init__( self : str , _lowercase : int | None = None ):
"""simple docstring"""
UpperCAmelCase__ = value
UpperCAmelCase__ = random()
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __repr__( self : Tuple ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = str(self.value ) + " "
UpperCAmelCase__ = str(self.left or "" )
UpperCAmelCase__ = str(self.right or "" )
return value + left + right
def __UpperCAmelCase ( __A , __A ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
UpperCAmelCase__ , UpperCAmelCase__ = split(root.left , __A )
return left, root
else:
UpperCAmelCase__ , UpperCAmelCase__ = split(root.right , __A )
return root, right
def __UpperCAmelCase ( __A , __A ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
UpperCAmelCase__ = merge(left.right , __A )
return left
else:
UpperCAmelCase__ = merge(__A , right.left )
return right
def __UpperCAmelCase ( __A , __A ) -> Node | None:
'''simple docstring'''
UpperCAmelCase__ = Node(__A )
UpperCAmelCase__ , UpperCAmelCase__ = split(__A , __A )
return merge(merge(__A , __A ) , __A )
def __UpperCAmelCase ( __A , __A ) -> Node | None:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = split(__A , value - 1 )
UpperCAmelCase__ , UpperCAmelCase__ = split(__A , __A )
return merge(__A , __A )
def __UpperCAmelCase ( __A ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def __UpperCAmelCase ( __A , __A ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
UpperCAmelCase__ = insert(__A , int(arg[1:] ) )
elif arg[0] == "-":
UpperCAmelCase__ = erase(__A , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
UpperCAmelCase__ = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
UpperCAmelCase__ = input()
while args != "q":
UpperCAmelCase__ = interact_treap(__A , __A )
print(__A )
UpperCAmelCase__ = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 475 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , *_lowercase : List[str] , **_lowercase : Optional[Any] ):
"""simple docstring"""
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(_lowercase )
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : Optional[int]=None , _lowercase : Dict=None , _lowercase : Optional[int]=None , **_lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = {}, {}
if padding is not None:
UpperCAmelCase__ = padding
if truncation is not None:
UpperCAmelCase__ = truncation
if top_k is not None:
UpperCAmelCase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Any , _lowercase : Union["Image.Image", str] , _lowercase : str = None , **_lowercase : Optional[Any] ):
"""simple docstring"""
if isinstance(_lowercase , (Image.Image, str) ) and isinstance(_lowercase , _lowercase ):
UpperCAmelCase__ = {"image": image, "question": question}
else:
UpperCAmelCase__ = image
UpperCAmelCase__ = super().__call__(_lowercase , **_lowercase )
return results
def _UpperCAmelCase ( self : str , _lowercase : Union[str, Any] , _lowercase : int=False , _lowercase : int=False ):
"""simple docstring"""
UpperCAmelCase__ = load_image(inputs["image"] )
UpperCAmelCase__ = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=_lowercase , truncation=_lowercase )
UpperCAmelCase__ = self.image_processor(images=_lowercase , return_tensors=self.framework )
model_inputs.update(_lowercase )
return model_inputs
def _UpperCAmelCase ( self : Any , _lowercase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model(**_lowercase )
return model_outputs
def _UpperCAmelCase ( self : str , _lowercase : Union[str, Any] , _lowercase : str=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_lowercase )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowercase , _lowercase )]
| 475 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A_ :
"""simple docstring"""
def __init__( self : List[Any] ,__A : str ,__A : Dict=13 ,__A : Dict=7 ,__A : List[Any]=True ,__A : Union[str, Any]=True ,__A : Any=99 ,__A : Optional[Any]=32 ,__A : Any=5 ,__A : int=4 ,__A : Dict=37 ,__A : Dict="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=50 ,__A : str=0.02 ,__A : str=True ,__A : List[Any]=None ,) -> Optional[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = initializer_range
_lowercase = use_labels
_lowercase = scope
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__A ,initializer_range=self.initializer_range ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.prepare_config_and_inputs()
_lowercase = True
_lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self : Dict ,__A : List[Any] ,__A : Optional[int] ,__A : Optional[int] ,__A : int ,**__A : Union[str, Any] ,) -> List[Any]:
_lowercase = BertGenerationEncoder(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A )
_lowercase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] ,__A : Optional[Any] ,__A : int ,__A : Dict ,__A : int ,__A : Tuple ,__A : str ,**__A : int ,) -> Optional[Any]:
_lowercase = True
_lowercase = BertGenerationEncoder(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,)
_lowercase = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] ,__A : List[str] ,__A : List[str] ,__A : Optional[Any] ,__A : List[Any] ,__A : Optional[int] ,__A : Optional[int] ,**__A : Dict ,) -> List[Any]:
_lowercase = True
_lowercase = True
_lowercase = BertGenerationDecoder(config=__A ).to(__A ).eval()
# first forward pass
_lowercase = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,use_cache=__A ,)
_lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_lowercase = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = torch.cat([input_mask, next_mask] ,dim=-1 )
_lowercase = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,output_hidden_states=__A ,)['hidden_states'][0]
_lowercase = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,past_key_values=__A ,output_hidden_states=__A ,)['hidden_states'][0]
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : List[str] ,__A : List[str] ,__A : List[str] ,__A : List[Any] ,__A : List[str] ,*__A : List[Any] ,) -> Tuple:
_lowercase = BertGenerationDecoder(__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
_lowercase , _lowercase , _lowercase , _lowercase = self.prepare_config_and_inputs()
_lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (BertGenerationDecoder,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
_lowercase = BertGenerationEncoderTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,hidden_size=37 )
def __UpperCAmelCase ( self : int ) -> int:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Tuple ) -> Any:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
_lowercase , _lowercase , _lowercase , _lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = 'bert'
self.model_tester.create_and_check_model(__A ,__A ,__A ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
_lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# This regression test was failing with PyTorch < 1.3
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowercase = None
self.model_tester.create_and_check_model_as_decoder(
__A ,__A ,__A ,__A ,__A ,__A ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
_lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__A )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__A )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
_lowercase = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
_lowercase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
_lowercase = model(__A )[0]
_lowercase = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__A )
_lowercase = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__A ,atol=1e-4 ) )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
_lowercase = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
_lowercase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
_lowercase = model(__A )[0]
_lowercase = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__A )
_lowercase = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__A ,atol=1e-4 ) ) | 535 |
import string
import numpy
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
SCREAMING_SNAKE_CASE_ : Dict = numpy.vectorize(lambda UpperCAmelCase : x % 3_6 )
SCREAMING_SNAKE_CASE_ : List[Any] = numpy.vectorize(UpperCAmelCase )
def __init__( self : Optional[Any] ,__A : numpy.ndarray ) -> None:
_lowercase = self.modulus(__A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowercase = encrypt_key.shape[0]
def __UpperCAmelCase ( self : Tuple ,__A : str ) -> int:
return self.key_string.index(__A )
def __UpperCAmelCase ( self : Optional[int] ,__A : int ) -> str:
return self.key_string[round(__A )]
def __UpperCAmelCase ( self : str ) -> None:
_lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase = det % len(self.key_string )
_lowercase = len(self.key_string )
if greatest_common_divisor(__A ,len(self.key_string ) ) != 1:
_lowercase = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__A )
def __UpperCAmelCase ( self : Any ,__A : str ) -> str:
_lowercase = [char for char in text.upper() if char in self.key_string]
_lowercase = chars[-1]
while len(__A ) % self.break_key != 0:
chars.append(__A )
return "".join(__A )
def __UpperCAmelCase ( self : Optional[int] ,__A : str ) -> str:
_lowercase = self.process_text(text.upper() )
_lowercase = ''
for i in range(0 ,len(__A ) - self.break_key + 1 ,self.break_key ):
_lowercase = text[i : i + self.break_key]
_lowercase = [self.replace_letters(__A ) for char in batch]
_lowercase = numpy.array([vec] ).T
_lowercase = self.modulus(self.encrypt_key.dot(__A ) ).T.tolist()[
0
]
_lowercase = ''.join(
self.replace_digits(__A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __UpperCAmelCase ( self : List[Any] ) -> numpy.ndarray:
_lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase = det % len(self.key_string )
_lowercase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowercase = i
break
_lowercase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__A ) )
def __UpperCAmelCase ( self : Tuple ,__A : str ) -> str:
_lowercase = self.make_decrypt_key()
_lowercase = self.process_text(text.upper() )
_lowercase = ''
for i in range(0 ,len(__A ) - self.break_key + 1 ,self.break_key ):
_lowercase = text[i : i + self.break_key]
_lowercase = [self.replace_letters(__A ) for char in batch]
_lowercase = numpy.array([vec] ).T
_lowercase = self.modulus(decrypt_key.dot(__A ) ).T.tolist()[0]
_lowercase = ''.join(
self.replace_digits(__A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = int(input('Enter the order of the encryption key: ' ) )
_lowercase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(snake_case__ ):
_lowercase = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
_lowercase = HillCipher(numpy.array(snake_case__ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_lowercase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_lowercase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
_lowercase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 535 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class snake_case__ ( nn.Module ):
_lowerCAmelCase =42
_lowerCAmelCase =jnp.floataa
def UpperCAmelCase__ ( self : Dict ):
snake_case__ : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , _lowerCamelCase : Union[str, Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] = hidden_states.shape
snake_case__ : Optional[int] = jax.image.resize(
_lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
snake_case__ : Tuple = self.conv(_lowerCamelCase )
return hidden_states
class snake_case__ ( nn.Module ):
_lowerCAmelCase =42
_lowerCAmelCase =jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , _lowerCamelCase : Union[str, Any] ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
snake_case__ : List[str] = self.conv(_lowerCamelCase )
return hidden_states
class snake_case__ ( nn.Module ):
_lowerCAmelCase =42
_lowerCAmelCase =None
_lowerCAmelCase =0.0
_lowerCAmelCase =None
_lowerCAmelCase =jnp.floataa
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : List[Any] = self.in_channels if self.out_channels is None else self.out_channels
snake_case__ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
snake_case__ : Optional[Any] = nn.Conv(
_lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case__ : List[str] = nn.Dense(_lowerCamelCase , dtype=self.dtype )
snake_case__ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
snake_case__ : Dict = nn.Dropout(self.dropout_prob )
snake_case__ : Tuple = nn.Conv(
_lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case__ : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
snake_case__ : List[str] = None
if use_nin_shortcut:
snake_case__ : Union[str, Any] = nn.Conv(
_lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Optional[int]=True ):
snake_case__ : Union[str, Any] = hidden_states
snake_case__ : str = self.norma(_lowerCamelCase )
snake_case__ : Union[str, Any] = nn.swish(_lowerCamelCase )
snake_case__ : Optional[Any] = self.conva(_lowerCamelCase )
snake_case__ : str = self.time_emb_proj(nn.swish(_lowerCamelCase ) )
snake_case__ : Optional[Any] = jnp.expand_dims(jnp.expand_dims(_lowerCamelCase , 1 ) , 1 )
snake_case__ : Union[str, Any] = hidden_states + temb
snake_case__ : Any = self.norma(_lowerCamelCase )
snake_case__ : Dict = nn.swish(_lowerCamelCase )
snake_case__ : List[str] = self.dropout(_lowerCamelCase , _lowerCamelCase )
snake_case__ : Optional[int] = self.conva(_lowerCamelCase )
if self.conv_shortcut is not None:
snake_case__ : int = self.conv_shortcut(_lowerCamelCase )
return hidden_states + residual
| 170 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowercase__( A ):
# A local function to see if a dot lands in the circle.
def is_in_circle(A , A ) -> bool:
snake_case__ : Optional[Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case__ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(A ) )
# The ratio of the area for circle to square is pi/4.
snake_case__ : Optional[Any] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowercase__( A , A , A = 0.0 , A = 1.0 , ):
return mean(
function_to_integrate(uniform(A , A ) ) for _ in range(A ) ) * (max_value - min_value)
def lowercase__( A , A = 0.0 , A = 1.0 ):
def identity_function(A ) -> float:
return x
snake_case__ : List[Any] = area_under_curve_estimator(
A , A , A , A )
snake_case__ : List[str] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def lowercase__( A ):
def function_to_integrate(A ) -> float:
return sqrt(4.0 - x * x )
snake_case__ : Tuple = area_under_curve_estimator(
A , A , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
'''simple docstring'''
lowercase__ : Optional[int] = list(range(len(lowercase_ ) ) )
lowercase__ : Tuple = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
lowercase__ : float = 0
lowercase__ : list[float] = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
lowercase__ : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
lowercase__ : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
import argparse
import os
import re
lowerCamelCase__ : List[Any] = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCamelCase__ : Union[str, Any] = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase__ : Any = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase__ : int = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase__ : List[Any] = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase__ : str = re.compile(R"""\[([^\]]+)\]""")
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : Optional[Any] = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def UpperCamelCase ( lowercase_ , lowercase_="" , lowercase_=None , lowercase_=None ) -> Dict:
'''simple docstring'''
lowercase__ : List[str] = 0
lowercase__ : List[Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
lowercase__ : List[str] = ["""\n""".join(lines[:index] )]
else:
lowercase__ : Optional[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ : List[Any] = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
lowercase__ : str = [lines[index + 1]]
index += 1
else:
lowercase__ : Union[str, Any] = []
else:
blocks.append("""\n""".join(lowercase_ ) )
lowercase__ : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append("""\n""".join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCamelCase ( lowercase_ ) -> List[Any]:
'''simple docstring'''
def _inner(lowercase_ ):
return key(lowercase_ ).lower().replace("""_""" , """""" )
return _inner
def UpperCamelCase ( lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
def noop(lowercase_ ):
return x
if key is None:
lowercase__ : Dict = noop
# Constants are all uppercase, they go first.
lowercase__ : Dict = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ : Any = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ : Optional[int] = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
lowercase__ : Tuple = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def UpperCamelCase ( lowercase_ ) -> Tuple:
'''simple docstring'''
def _replace(lowercase_ ):
lowercase__ : int = match.groups()[0]
if "," not in imports:
return F'[{imports}]'
lowercase__ : Union[str, Any] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ : int = keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(lowercase_ )] ) + "]"
lowercase__ : Any = import_statement.split("""\n""" )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ : Tuple = 2 if lines[1].strip() == """[""" else 1
lowercase__ : Optional[int] = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ : Optional[Any] = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] )
lowercase__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ : List[str] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ : Optional[int] = keys[:-1]
lowercase__ : int = get_indent(lines[1] ) + """, """.join([F'"{k}"' for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ : Any = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def UpperCamelCase ( lowercase_ , lowercase_=True ) -> Optional[int]:
'''simple docstring'''
with open(lowercase_ , encoding="""utf-8""" ) as f:
lowercase__ : str = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ : Any = split_code_in_indented_blocks(
lowercase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ : List[str] = main_blocks[block_idx]
lowercase__ : str = block.split("""\n""" )
# Get to the start of the imports.
lowercase__ : Optional[Any] = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ : int = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ : List[str] = """\n""".join(block_lines[line_idx:-1] )
lowercase__ : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ : List[str] = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ : Optional[int] = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ : Optional[Any] = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ : List[Any] = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
lowercase__ : Optional[int] = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ : Any = 0
lowercase__ : Optional[Any] = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ : Union[str, Any] = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase_ ) )
def UpperCamelCase ( lowercase_=True ) -> Optional[int]:
'''simple docstring'''
lowercase__ : int = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
lowercase__ : Dict = sort_imports(os.path.join(lowercase_ , """__init__.py""" ) , check_only=lowercase_ )
if result:
lowercase__ : List[str] = [os.path.join(lowercase_ , """__init__.py""" )]
if len(lowercase_ ) > 0:
raise ValueError(F'Would overwrite {len(lowercase_ )} files, run `make style`.' )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCamelCase__ : List[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 495 | 0 |
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> bool:
# Base Case
if curr_ind == len(a ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(a ) ):
if valid_connection(a , a , a , a ):
# Insert current vertex into path as next transition
__A : str = next_ver
# Validate created path
if util_hamilton_cycle(a , a , curr_ind + 1 ):
return True
# Backtrack
__A : Tuple = -1
return False
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list[int]:
__A : List[str] = [-1] * (len(a ) + 1)
# initialize start and end of path with starting index
__A : Any = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(a , a , 1 ) else []
| 239 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _SCREAMING_SNAKE_CASE ( a , a ) -> str | None:
__A : str = ""
__A : int
__A : int
__A : int
for keychar, cipherchar in zip(cycle(a ) , a ):
__A : List[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(a )
return decoded
def _SCREAMING_SNAKE_CASE ( a ) -> list[str]:
__A : list[str] = []
for key in product(a , repeat=3 ):
__A : str = try_key(a , a )
if encoded is not None:
possibles.append(a )
return possibles
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def _SCREAMING_SNAKE_CASE ( a = "p059_cipher.txt" ) -> int:
__A : list[int]
__A : list[str]
__A : str
__A : str
__A : str = Path(a ).parent.joinpath(a ).read_text(encoding='utf-8' )
__A : Union[str, Any] = [int(a ) for number in data.strip().split(',' )]
__A : Any = filter_valid_chars(a )
for common_word in COMMON_WORDS:
__A : Tuple = filter_common_word(a , a )
if len(a ) == 1:
break
__A : Union[str, Any] = possibles[0]
return sum(ord(a ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 239 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
print("""Making key files...""" )
make_key_files("""rsa""" , 10_24 )
print("""Key files generation successful.""" )
def UpperCamelCase__ ( __magic_name__ : int ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("""Generating prime p...""" )
snake_case__ : int = rabinMiller.generate_large_prime(__magic_name__ )
print("""Generating prime q...""" )
snake_case__ : str = rabinMiller.generate_large_prime(__magic_name__ )
snake_case__ : Optional[Any] = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
snake_case__ : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__magic_name__ , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
snake_case__ : Optional[int] = cryptoMath.find_mod_inverse(__magic_name__ , (p - 1) * (q - 1) )
snake_case__ : Dict = (n, e)
snake_case__ : Dict = (n, d)
return (public_key, private_key)
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : int ) -> None:
'''simple docstring'''
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print("""\nWARNING:""" )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case__ : List[str] = generate_key(__magic_name__ )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt" , """w""" ) as out_file:
out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt" , """w""" ) as out_file:
out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 719 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : Tuple = BlipImageProcessor()
snake_case__ : Dict = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
snake_case__ : Dict = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).tokenizer
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : Union[str, Any] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case__ : Optional[int] = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case__ : Any = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Optional[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.prepare_image_inputs()
snake_case__ : Optional[int] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
snake_case__ : int = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """lower newer"""
snake_case__ : List[Any] = processor(text=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
snake_case__ : str = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[str] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = """lower newer"""
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : Tuple = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : int = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : List[Any] = processor.batch_decode(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : List[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = """lower newer"""
snake_case__ : List[Any] = self.prepare_image_inputs()
snake_case__ : Optional[Any] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 419 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: List[str]=3 , _SCREAMING_SNAKE_CASE: List[str]=18 , _SCREAMING_SNAKE_CASE: str=30 , _SCREAMING_SNAKE_CASE: List[str]=400 , _SCREAMING_SNAKE_CASE: Dict=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=True , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = size if size is not None else {'''height''': 18, '''width''': 18}
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Optional[Any] = num_channels
__lowerCAmelCase : Any = image_size
__lowerCAmelCase : List[str] = min_resolution
__lowerCAmelCase : str = max_resolution
__lowerCAmelCase : int = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : List[Any] = apply_ocr
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = LayoutLMvaImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A__ , "do_resize"))
self.assertTrue(hasattr(A__ , "size"))
self.assertTrue(hasattr(A__ , "apply_ocr"))
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 18, "width": 18})
__lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__)
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image)
# Test not batched input
__lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , A__)
self.assertIsInstance(encoding.boxes , A__)
# Test batched
__lowerCAmelCase : Tuple = image_processing(A__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__)
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray)
# Test not batched input
__lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase : List[str] = image_processing(A__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__)
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor)
# Test not batched input
__lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase : Dict = image_processing(A__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowerCAmelCase : int = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test")
__lowerCAmelCase : Tuple = Image.open(ds[0]["file"]).convert("RGB")
__lowerCAmelCase : Optional[int] = image_processing(A__ , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowerCAmelCase : List[str] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__lowerCAmelCase : int = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A__)
self.assertListEqual(encoding.boxes , A__)
# with apply_OCR = False
__lowerCAmelCase : int = LayoutLMvaImageProcessor(apply_ocr=A__)
__lowerCAmelCase : Optional[Any] = image_processing(A__ , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224)) | 293 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_UpperCamelCase : Union[str, Any] =logging.get_logger(__name__)
def a__ (__lowercase :str ) -> List[List[ImageInput]]:
if isinstance(__lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowercase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Optional[int] = ["pixel_values"]
def __init__( self ,A__ = True ,A__ = None ,A__ = PILImageResampling.BILINEAR ,A__ = True ,A__ = None ,A__ = True ,A__ = 1 / 255 ,A__ = True ,A__ = True ,A__ = None ,A__ = None ,**A__ ,):
super().__init__(**A__ )
_A : Tuple = size if size is not None else {'''shortest_edge''': 256}
_A : str = get_size_dict(A__ ,default_to_square=A__ )
_A : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_A : str = get_size_dict(A__ ,param_name='''crop_size''' )
_A : Tuple = do_resize
_A : Optional[Any] = size
_A : Optional[Any] = do_center_crop
_A : List[str] = crop_size
_A : Dict = resample
_A : Tuple = do_rescale
_A : int = rescale_factor
_A : List[str] = offset
_A : List[Any] = do_normalize
_A : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self ,A__ ,A__ ,A__ = PILImageResampling.BILINEAR ,A__ = None ,**A__ ,):
_A : str = get_size_dict(A__ ,default_to_square=A__ )
if "shortest_edge" in size:
_A : Optional[Any] = get_resize_output_image_size(A__ ,size['''shortest_edge'''] ,default_to_square=A__ )
elif "height" in size and "width" in size:
_A : str = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(A__ ,size=A__ ,resample=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
_A : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(A__ ,size=(size['''height'''], size['''width''']) ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ = True ,A__ = None ,**A__ ,):
_A : Any = image.astype(np.floataa )
if offset:
_A : List[str] = image - (scale / 2)
return rescale(A__ ,scale=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__ ,):
return normalize(A__ ,mean=A__ ,std=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
_A : Dict = to_numpy_array(A__ )
if do_resize:
_A : Any = self.resize(image=A__ ,size=A__ ,resample=A__ )
if do_center_crop:
_A : str = self.center_crop(A__ ,size=A__ )
if do_rescale:
_A : Optional[int] = self.rescale(image=A__ ,scale=A__ ,offset=A__ )
if do_normalize:
_A : Dict = self.normalize(image=A__ ,mean=A__ ,std=A__ )
_A : Union[str, Any] = to_channel_dimension_format(A__ ,A__ )
return image
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
_A : Optional[int] = do_resize if do_resize is not None else self.do_resize
_A : List[str] = resample if resample is not None else self.resample
_A : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_A : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : List[str] = offset if offset is not None else self.offset
_A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_A : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_A : Dict = image_std if image_std is not None else self.image_std
_A : str = size if size is not None else self.size
_A : int = get_size_dict(A__ ,default_to_square=A__ )
_A : Any = crop_size if crop_size is not None else self.crop_size
_A : Optional[int] = get_size_dict(A__ ,param_name='''crop_size''' )
if not valid_images(A__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_A : List[str] = make_batched(A__ )
_A : Tuple = [
[
self._preprocess_image(
image=A__ ,do_resize=A__ ,size=A__ ,resample=A__ ,do_center_crop=A__ ,crop_size=A__ ,do_rescale=A__ ,rescale_factor=A__ ,offset=A__ ,do_normalize=A__ ,image_mean=A__ ,image_std=A__ ,data_format=A__ ,)
for img in video
]
for video in videos
]
_A : Optional[int] = {'''pixel_values''': videos}
return BatchFeature(data=A__ ,tensor_type=A__ )
| 206 | 0 |
'''simple docstring'''
def A__ ( A : Optional[int] , A : int):
'''simple docstring'''
UpperCamelCase : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def A__ ( A : int , A : Optional[Any] , A : List[str]):
'''simple docstring'''
UpperCamelCase : Optional[Any] = 0
while b > 0:
if b & 1:
UpperCamelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 712 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 435 | 0 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def a ( _UpperCAmelCase ) -> Callable:
"""simple docstring"""
@wraps(_UpperCAmelCase )
def _inner_fn(*_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , _UpperCAmelCase , )
return fn(*_UpperCAmelCase , **_UpperCAmelCase )
return _inner_fn
| 697 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCamelCase( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
__snake_case = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
__snake_case = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__snake_case = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__snake_case = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
__snake_case = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE : Dict ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__snake_case = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__snake_case = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
__snake_case = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE : List[Any] ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , "bert-base is not a local folder and is not a valid model identifier" ):
__snake_case = FlaxAutoModel.from_pretrained("bert-base" )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__snake_case = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision="aaaaaa" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
__snake_case = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , "Use `from_pt=True` to load this model" ):
__snake_case = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 706 |
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
__snake_case = abs(_lowerCAmelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
__snake_case = abs(_lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
return sum(int(_lowerCAmelCase ) for c in str(abs(_lowerCAmelCase ) ) )
def _lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase ) -> None:
__snake_case = F'''{func.__name__}({value})'''
__snake_case = timeit(F'''__main__.{call}''' , setup="import __main__" )
print(F'''{call:56} = {func(_lowerCAmelCase )} -- {timing:.4f} seconds''' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 473 | 0 |
import requests
UpperCAmelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# fetching a list of articles in json format
lowercase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 84 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase = logging.get_logger(__name__)
lowercase = Dict[str, Any]
lowercase = List[Prediction]
@add_end_docstrings(A )
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : str , *_a : Optional[int] , **_a : Dict ):
super().__init__(*_a , **_a )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def A_ ( self : str , **_a : List[str] ):
UpperCamelCase__ = {}
if "threshold" in kwargs:
UpperCamelCase__ = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : List[str] , *_a : List[Any] , **_a : List[Any] ):
return super().__call__(*_a , **_a )
def A_ ( self : Optional[int] , _a : str ):
UpperCamelCase__ = load_image(_a )
UpperCamelCase__ = torch.IntTensor([[image.height, image.width]] )
UpperCamelCase__ = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
UpperCamelCase__ = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
UpperCamelCase__ = target_size
return inputs
def A_ ( self : str , _a : List[str] ):
UpperCamelCase__ = model_inputs.pop('''target_size''' )
UpperCamelCase__ = self.model(**_a )
UpperCamelCase__ = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
UpperCamelCase__ = model_inputs['''bbox''']
return model_outputs
def A_ ( self : Any , _a : List[str] , _a : Union[str, Any]=0.9 ):
UpperCamelCase__ = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCamelCase__ , UpperCamelCase__ = target_size[0].tolist()
def unnormalize(_a : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
UpperCamelCase__ , UpperCamelCase__ = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCamelCase__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCamelCase__ = [unnormalize(_a ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
UpperCamelCase__ = ['''score''', '''label''', '''box''']
UpperCamelCase__ = [dict(zip(_a , _a ) ) for vals in zip(scores.tolist() , _a , _a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCamelCase__ = self.image_processor.post_process_object_detection(_a , _a , _a )
UpperCamelCase__ = raw_annotations[0]
UpperCamelCase__ = raw_annotation['''scores''']
UpperCamelCase__ = raw_annotation['''labels''']
UpperCamelCase__ = raw_annotation['''boxes''']
UpperCamelCase__ = scores.tolist()
UpperCamelCase__ = [self.model.config.idalabel[label.item()] for label in labels]
UpperCamelCase__ = [self._get_bounding_box(_a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCamelCase__ = ['''score''', '''label''', '''box''']
UpperCamelCase__ = [
dict(zip(_a , _a ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def A_ ( self : Tuple , _a : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = box.int().tolist()
UpperCamelCase__ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 591 | import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''file.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''malformed_file.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''csv_with_image.csv'''
UpperCamelCase__ = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''csv_with_label.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''csv_with_int_list.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple ):
'''simple docstring'''
UpperCamelCase__ = Csv()
UpperCamelCase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCamelCase__, match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(UpperCamelCase__ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ):
'''simple docstring'''
with open(UpperCamelCase__, encoding='''utf-8''' ) as f:
UpperCamelCase__ = f.read().splitlines()[1]
UpperCamelCase__ = Csv(encoding='''utf-8''', features=Features({'''image''': Image()} ) )
UpperCamelCase__ = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
UpperCamelCase__ = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ):
'''simple docstring'''
with open(UpperCamelCase__, encoding='''utf-8''' ) as f:
UpperCamelCase__ = f.read().splitlines()[1:]
UpperCamelCase__ = Csv(encoding='''utf-8''', features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
UpperCamelCase__ = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
UpperCamelCase__ = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(UpperCamelCase__ ) for label in labels]
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = Csv(encoding='''utf-8''', sep=''',''', converters={'''int_list''': lambda UpperCamelCase__ : [int(UpperCamelCase__ ) for i in x.split()]} )
UpperCamelCase__ = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
UpperCamelCase__ = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 591 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
"""simple docstring"""
def __init__(self: Dict , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Any=13 , __UpperCAmelCase: Optional[int]=30 , __UpperCAmelCase: Union[str, Any]=2 , __UpperCAmelCase: Any=3 , __UpperCAmelCase: Optional[int]=True , __UpperCAmelCase: Dict=True , __UpperCAmelCase: Optional[int]=32 , __UpperCAmelCase: Any=5 , __UpperCAmelCase: int=4 , __UpperCAmelCase: Union[str, Any]=37 , __UpperCAmelCase: int="gelu" , __UpperCAmelCase: int=0.1 , __UpperCAmelCase: Dict=0.1 , __UpperCAmelCase: List[Any]=10 , __UpperCAmelCase: int=0.02 , __UpperCAmelCase: Union[str, Any]=None , __UpperCAmelCase: Tuple=2 , ) -> Optional[int]:
'''simple docstring'''
__a : Any = parent
__a : Optional[int] = batch_size
__a : Optional[Any] = image_size
__a : Tuple = patch_size
__a : int = num_channels
__a : int = is_training
__a : Tuple = use_labels
__a : str = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[int] = hidden_act
__a : Optional[int] = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : List[str] = type_sequence_label_size
__a : Tuple = initializer_range
__a : str = scope
__a : Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a : Optional[Any] = (image_size // patch_size) ** 2
__a : int = num_patches + 1
def UpperCAmelCase__ (self: Optional[Any] ) -> Any:
'''simple docstring'''
__a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: Dict , __UpperCAmelCase: Dict , __UpperCAmelCase: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__a : List[Any] = ViTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : Dict = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: str , __UpperCAmelCase: List[str] ) -> int:
'''simple docstring'''
__a : List[Any] = ViTForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : Tuple = model(__UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a : Any = 1
__a : Optional[Any] = ViTForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Any = model(__UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ (self: Tuple , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: int , __UpperCAmelCase: str ) -> List[Any]:
'''simple docstring'''
__a : List[str] = self.type_sequence_label_size
__a : Tuple = ViTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : Optional[int] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Any = 1
__a : Any = ViTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__a : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ (self: Optional[int] ) -> List[Any]:
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) : List[Any] = config_and_inputs
__a : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case__ = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
def UpperCAmelCase__ (self: str ) -> Tuple:
'''simple docstring'''
__a : Optional[int] = ViTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ (self: Optional[Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def UpperCAmelCase__ (self: int ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ (self: Any ) -> Optional[Any]:
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase__ (self: List[str] ) -> List[Any]:
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = model_class(__UpperCAmelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[int] = [*signature.parameters.keys()]
__a : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase__ (self: int ) -> Any:
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase__ (self: List[str] ) -> Any:
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def UpperCAmelCase__ (self: int ) -> str:
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def UpperCAmelCase__ (self: Tuple ) -> Dict:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = ViTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def a_ () -> str:
"""simple docstring"""
__a : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ (self: Dict ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ (self: str ) -> Dict:
'''simple docstring'''
__a : Any = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__UpperCAmelCase )
__a : Optional[int] = self.default_image_processor
__a : List[Any] = prepare_img()
__a : Tuple = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__a : List[str] = model(**__UpperCAmelCase )
# verify the logits
__a : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__a : List[Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ (self: Optional[int] ) -> int:
'''simple docstring'''
__a : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__UpperCAmelCase )
__a : Optional[int] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
__a : Dict = prepare_img()
__a : Tuple = image_processor(images=__UpperCAmelCase , return_tensors="pt" )
__a : str = inputs.pixel_values.to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__a : List[Any] = model(__UpperCAmelCase , interpolate_pos_encoding=__UpperCAmelCase )
# verify the logits
__a : List[str] = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __UpperCAmelCase )
__a : List[str] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ (self: int ) -> int:
'''simple docstring'''
__a : List[Any] = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a : Tuple = self.default_image_processor
__a : Union[str, Any] = prepare_img()
__a : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors="pt" )
__a : Any = inputs.pixel_values.to(__UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a : List[str] = model(__UpperCAmelCase )
| 351 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ ( __UpperCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__ (__UpperCAmelCase: ArgumentParser ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ (self: List[str] ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
| 351 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class snake_case (SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ :List[Any] = "encoder-decoder"
lowerCAmelCase__ :Optional[int] = True
def __init__( self ,**UpperCAmelCase_ ) -> Any:
super().__init__(**_lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase__ = kwargs.pop("encoder" )
lowercase__ = encoder_config.pop("model_type" )
lowercase__ = kwargs.pop("decoder" )
lowercase__ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowercase__ = AutoConfig.for_model(_lowercase ,**_lowercase )
lowercase__ = AutoConfig.for_model(_lowercase ,**_lowercase )
lowercase__ = True
@classmethod
def _a ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowercase__ = True
lowercase__ = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**_lowercase )
def _a ( self ) -> int:
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.encoder.to_dict()
lowercase__ = self.decoder.to_dict()
lowercase__ = self.__class__.model_type
return output
| 721 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
SCREAMING_SNAKE_CASE__ = "http://www.mocksite.com/file1.txt"
SCREAMING_SNAKE_CASE__ = "\"text\": [\"foo\", \"foo\"]"
SCREAMING_SNAKE_CASE__ = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class snake_case :
lowerCAmelCase__ :Union[str, Any] = 200
lowerCAmelCase__ :Tuple = {"Content-Length": "100"}
lowerCAmelCase__ :Any = {}
def _a ( self ,**UpperCAmelCase_ ) -> List[Any]:
return [bytes(UpperCAmelCase_ ,"utf-8" )]
def lowerCamelCase ( *_snake_case : int ,**_snake_case : int ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("urls_type" ,[str, list, dict] )
def lowerCamelCase ( _snake_case : str ,_snake_case : List[Any] ,_snake_case : List[Any] ):
'''simple docstring'''
import requests
monkeypatch.setattr(_snake_case ,"request" ,_snake_case )
lowercase__ = URL
if issubclass(_snake_case ,_snake_case ):
lowercase__ = url
elif issubclass(_snake_case ,_snake_case ):
lowercase__ = [url]
elif issubclass(_snake_case ,_snake_case ):
lowercase__ = {"train": url}
lowercase__ = "dummy"
lowercase__ = "downloads"
lowercase__ = tmp_path
lowercase__ = DownloadConfig(
cache_dir=os.path.join(_snake_case ,_snake_case ) ,use_etag=_snake_case ,)
lowercase__ = DownloadManager(dataset_name=_snake_case ,download_config=_snake_case )
lowercase__ = dl_manager.download(_snake_case )
lowercase__ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_snake_case ,_snake_case ):
lowercase__ = [downloaded_paths]
lowercase__ = [urls]
elif isinstance(_snake_case ,_snake_case ):
assert "train" in downloaded_paths.keys()
lowercase__ = downloaded_paths.values()
lowercase__ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_snake_case ,_snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowercase__ = Path(_snake_case )
lowercase__ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowercase__ = downloaded_path.read_text()
assert content == CONTENT
lowercase__ = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
lowercase__ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" ,[str, list, dict] )
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : List[str] ,_snake_case : List[str] ):
'''simple docstring'''
lowercase__ = str(_snake_case )
if issubclass(_snake_case ,_snake_case ):
lowercase__ = filename
elif issubclass(_snake_case ,_snake_case ):
lowercase__ = [filename]
elif issubclass(_snake_case ,_snake_case ):
lowercase__ = {"train": filename}
lowercase__ = "dummy"
lowercase__ = xz_file.parent
lowercase__ = "extracted"
lowercase__ = DownloadConfig(
cache_dir=_snake_case ,use_etag=_snake_case ,)
lowercase__ = DownloadManager(dataset_name=_snake_case ,download_config=_snake_case )
lowercase__ = dl_manager.extract(_snake_case )
lowercase__ = paths
for extracted_paths in [extracted_paths]:
if isinstance(_snake_case ,_snake_case ):
lowercase__ = [extracted_paths]
lowercase__ = [paths]
elif isinstance(_snake_case ,_snake_case ):
assert "train" in extracted_paths.keys()
lowercase__ = extracted_paths.values()
lowercase__ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_snake_case ,_snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowercase__ = Path(_snake_case )
lowercase__ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_snake_case ,etag=_snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowercase__ = extracted_path.read_text()
lowercase__ = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase ( _snake_case : Tuple ,_snake_case : List[str] ):
'''simple docstring'''
assert path.endswith(".jsonl" )
for num_items, line in enumerate(_snake_case ,start=1 ):
lowercase__ = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" ,["tar_jsonl_path", "zip_jsonl_path"] )
def lowerCamelCase ( _snake_case : Any ,_snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase__ = request.getfixturevalue(_snake_case )
lowercase__ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) ,start=1 ):
_test_jsonl(_snake_case ,_snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" ,["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Dict ):
'''simple docstring'''
lowercase__ = request.getfixturevalue(_snake_case )
lowercase__ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_snake_case ) ,start=1 ):
_test_jsonl(_snake_case ,_snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase ( _snake_case : int ):
'''simple docstring'''
lowercase__ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_snake_case ) ,start=1 ):
assert os.path.basename(_snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 539 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger()
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : LevitConfig , __lowerCAmelCase : Path , __lowerCAmelCase : bool = True ) -> int:
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
snake_case = timm.create_model("""levit_128s""" , pretrained=__lowerCAmelCase )
else:
snake_case = timm.create_model("""levit_128""" , pretrained=__lowerCAmelCase )
if hidden_sizes == 1_92:
snake_case = timm.create_model("""levit_192""" , pretrained=__lowerCAmelCase )
if hidden_sizes == 2_56:
snake_case = timm.create_model("""levit_256""" , pretrained=__lowerCAmelCase )
if hidden_sizes == 3_84:
snake_case = timm.create_model("""levit_384""" , pretrained=__lowerCAmelCase )
from_model.eval()
snake_case = LevitForImageClassificationWithTeacher(__lowerCAmelCase ).eval()
snake_case = OrderedDict()
snake_case = from_model.state_dict()
snake_case = list(from_model.state_dict().keys() )
snake_case = list(our_model.state_dict().keys() )
print(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for i in range(len(__lowerCAmelCase ) ):
snake_case = weights[og_keys[i]]
our_model.load_state_dict(__lowerCAmelCase )
snake_case = torch.randn((2, 3, 2_24, 2_24) )
snake_case = from_model(__lowerCAmelCase )
snake_case = our_model(__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase ), "The model logits don't match the original one."
snake_case = name
print(__lowerCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
snake_case = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def __lowerCamelCase ( __lowerCAmelCase : Path , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = True ) -> List[Any]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = (1, num_labels)
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
snake_case = {
"""levit-128S""": 1_28,
"""levit-128""": 1_28,
"""levit-192""": 1_92,
"""levit-256""": 2_56,
"""levit-384""": 3_84,
}
snake_case = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 369 | 1 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__: List[str] = getLogger(__name__)
lowerCAmelCase__: Any = "cuda" if torch.cuda.is_available() else "cpu"
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="summarization" , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Dict:
SCREAMING_SNAKE_CASE_ : int = Path(SCREAMING_SNAKE_CASE ).open('w' , encoding='utf-8' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = str(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
if fpaa:
SCREAMING_SNAKE_CASE_ : Optional[int] = model.half()
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
SCREAMING_SNAKE_CASE_ : str = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if prefix is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ):
SCREAMING_SNAKE_CASE_ : List[Any] = [prefix + text for text in examples_chunk]
SCREAMING_SNAKE_CASE_ : Any = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE , padding='longest' ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(time.time() - start_time ) # seconds
SCREAMING_SNAKE_CASE_ : List[Any] = len(SCREAMING_SNAKE_CASE )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE=True ) -> List[str]:
SCREAMING_SNAKE_CASE_ : int = argparse.ArgumentParser()
parser.add_argument('model_name' , type=SCREAMING_SNAKE_CASE , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=SCREAMING_SNAKE_CASE , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=SCREAMING_SNAKE_CASE , help='where to save summaries' )
parser.add_argument('--reference_path' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=SCREAMING_SNAKE_CASE , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=SCREAMING_SNAKE_CASE , default=8 , required=SCREAMING_SNAKE_CASE , help='batch size' )
parser.add_argument(
'--n_obs' , type=SCREAMING_SNAKE_CASE , default=-1 , required=SCREAMING_SNAKE_CASE , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=SCREAMING_SNAKE_CASE , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_known_args()
SCREAMING_SNAKE_CASE_ : int = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
SCREAMING_SNAKE_CASE_ : str = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
SCREAMING_SNAKE_CASE_ : Tuple = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE , )
if args.reference_path is None:
return {}
# Compute scores
SCREAMING_SNAKE_CASE_ : int = calculate_bleu if 'translation' in args.task else calculate_rouge
SCREAMING_SNAKE_CASE_ : Tuple = [x.rstrip() for x in open(args.save_path ).readlines()]
SCREAMING_SNAKE_CASE_ : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE )]
SCREAMING_SNAKE_CASE_ : dict = score_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
scores.update(SCREAMING_SNAKE_CASE )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE )
if args.info:
SCREAMING_SNAKE_CASE_ : List[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 311 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__: Dict = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: List[str] = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Union[str, Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase__: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 | 1 |
import operator
def __UpperCamelCase ( lowercase__ : list , lowercase__ : bool = False , lowercase__ : list | None = None ) -> list:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = operator.lt if reverse else operator.gt
lowerCAmelCase_ : str = solution or []
if not arr:
return solution
lowerCAmelCase_ : Dict = [arr.pop(0 )]
for i, item in enumerate(lowercase__ ):
if _operator(lowercase__ , sublist[-1] ):
sublist.append(lowercase__ )
arr.pop(lowercase__ )
# merging sublist into solution list
if not solution:
solution.extend(lowercase__ )
else:
while sublist:
lowerCAmelCase_ : Tuple = sublist.pop(0 )
for i, xx in enumerate(lowercase__ ):
if not _operator(lowercase__ , lowercase__ ):
solution.insert(lowercase__ , lowercase__ )
break
else:
solution.append(lowercase__ )
strand_sort(lowercase__ , lowercase__ , lowercase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 600 |
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
while b:
lowerCAmelCase_ , lowerCAmelCase_ : int = b, a % b
return a
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(lowercase__ , a % b )
def __UpperCamelCase ( ) -> Dict:
'''simple docstring'''
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 600 | 1 |
import argparse
import os
import re
_lowerCAmelCase = """src/diffusers"""
# Pattern that looks at the indentation in a line.
_lowerCAmelCase = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase = re.compile(R"""\[([^\]]+)\]""")
def lowercase ( _a ) -> Optional[int]:
UpperCAmelCase_: str = _re_indent.search(a__ )
return "" if search is None else search.groups()[0]
def lowercase ( _a ,_a="" ,_a=None ,_a=None ) -> Dict:
UpperCAmelCase_: Any = 0
UpperCAmelCase_: List[str] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(a__ ):
index += 1
UpperCAmelCase_: Union[str, Any] = ["\n".join(lines[:index] )]
else:
UpperCAmelCase_: Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase_: List[Any] = [lines[index]]
index += 1
while index < len(a__ ) and (end_prompt is None or not lines[index].startswith(a__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(a__ ) )
if index < len(a__ ) - 1:
UpperCAmelCase_: Union[str, Any] = [lines[index + 1]]
index += 1
else:
UpperCAmelCase_: str = []
else:
blocks.append("\n".join(a__ ) )
UpperCAmelCase_: List[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a__ ) > 0:
blocks.append("\n".join(a__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def lowercase ( _a ) -> Tuple:
def _inner(_a ):
return key(a__ ).lower().replace("_" ,"" )
return _inner
def lowercase ( _a ,_a=None ) -> List[Any]:
def noop(_a ):
return x
if key is None:
UpperCAmelCase_: List[str] = noop
# Constants are all uppercase, they go first.
UpperCAmelCase_: Optional[Any] = [obj for obj in objects if key(a__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase_: Union[str, Any] = [obj for obj in objects if key(a__ )[0].isupper() and not key(a__ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase_: Optional[Any] = [obj for obj in objects if not key(a__ )[0].isupper()]
UpperCAmelCase_: Optional[int] = ignore_underscore(a__ )
return sorted(a__ ,key=a__ ) + sorted(a__ ,key=a__ ) + sorted(a__ ,key=a__ )
def lowercase ( _a ) -> int:
def _replace(_a ):
UpperCAmelCase_: Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
UpperCAmelCase_: Dict = [part.strip().replace("\"" ,"" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_: Optional[Any] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(a__ )] ) + "]"
UpperCAmelCase_: str = import_statement.split("\n" )
if len(a__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase_: int = 2 if lines[1].strip() == "[" else 1
UpperCAmelCase_: int = [(i, _re_strip_line.search(a__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase_: List[str] = sort_objects(a__ ,key=lambda _a : x[1] )
UpperCAmelCase_: Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase_: int = _re_bracket_content.sub(_replace ,lines[1] )
else:
UpperCAmelCase_: Optional[Any] = [part.strip().replace("\"" ,"" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_: Tuple = keys[:-1]
UpperCAmelCase_: List[Any] = get_indent(lines[1] ) + ", ".join([f"\"{k}\"" for k in sort_objects(a__ )] )
return "\n".join(a__ )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase_: Optional[int] = _re_bracket_content.sub(_replace ,a__ )
return import_statement
def lowercase ( _a ,_a=True ) -> int:
with open(a__ ,"r" ) as f:
UpperCAmelCase_: str = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase_: str = split_code_in_indented_blocks(
a__ ,start_prompt="_import_structure = {" ,end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(a__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase_: Tuple = main_blocks[block_idx]
UpperCAmelCase_: Dict = block.split("\n" )
# Get to the start of the imports.
UpperCAmelCase_: int = 0
while line_idx < len(a__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase_: Any = len(a__ )
else:
line_idx += 1
if line_idx >= len(a__ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase_: Optional[int] = "\n".join(block_lines[line_idx:-1] )
UpperCAmelCase_: int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase_: Optional[int] = split_code_in_indented_blocks(a__ ,indent_level=a__ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase_: Optional[Any] = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase_: Any = [(pattern.search(a__ ).groups()[0] if pattern.search(a__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase_: str = [(i, key) for i, key in enumerate(a__ ) if key is not None]
UpperCAmelCase_: Optional[int] = [x[0] for x in sorted(a__ ,key=lambda _a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase_: Any = 0
UpperCAmelCase_: List[str] = []
for i in range(len(a__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase_: Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(a__ )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase_: Optional[Any] = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(a__ ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(a__ ,"w" ) as f:
f.write("\n".join(a__ ) )
def lowercase ( _a=True ) -> List[str]:
UpperCAmelCase_: Tuple = []
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
UpperCAmelCase_: Optional[Any] = sort_imports(os.path.join(a__ ,"__init__.py" ) ,check_only=a__ )
if result:
UpperCAmelCase_: Optional[int] = [os.path.join(a__ ,"__init__.py" )]
if len(a__ ) > 0:
raise ValueError(f"Would overwrite {len(a__ )} files, run `make style`." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_lowerCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 714 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowercase ( _a ) -> Dict:
UpperCAmelCase_: Any = set()
UpperCAmelCase_: Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_: List[str] = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1024}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , A__ , A__="<s>" , A__="<pad>" , A__="</s>" , A__="<unk>" , A__=False , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(
unk_token=A__ , bos_token=A__ , eos_token=A__ , pad_token=A__ , do_lower_case=A__ , **A__ , )
UpperCAmelCase_: str = do_lower_case
with open(A__ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_: str = json.load(A__ )
UpperCAmelCase_: List[str] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Optional[int] = None
else:
with open(A__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_: List[Any] = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase_: List[Any] = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase_: Union[str, Any] = dict(zip(A__ , range(len(A__ ) ) ) )
UpperCAmelCase_: Dict = {}
@property
def snake_case_ ( self ):
"""simple docstring"""
return len(self.decoder )
def snake_case_ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase_: Any = get_pairs(A__ )
if not pairs:
return token
while True:
UpperCAmelCase_: List[str] = min(A__ , key=lambda A__ : self.bpe_ranks.get(A__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_: Tuple = bigram
UpperCAmelCase_: Optional[Any] = []
UpperCAmelCase_: Optional[int] = 0
while i < len(A__ ):
try:
UpperCAmelCase_: str = word.index(A__ , A__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_: str = j
if word[i] == first and i < len(A__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_: str = tuple(A__ )
UpperCAmelCase_: str = new_word
if len(A__ ) == 1:
break
else:
UpperCAmelCase_: Optional[Any] = get_pairs(A__ )
UpperCAmelCase_: str = " ".join(A__ )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase_: Tuple = "\n" + BPE_TOKEN_MERGES
if word.endswith(A__ ):
UpperCAmelCase_: Union[str, Any] = word.replace(A__ , "" )
UpperCAmelCase_: Dict = word.replace(" " , A__ )
UpperCAmelCase_: List[Any] = word
return word
def snake_case_ ( self , A__ ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase_: int = text.lower()
UpperCAmelCase_: Optional[Any] = text.split()
UpperCAmelCase_: Tuple = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(A__ ).split(" " ) ) )
return split_tokens
def snake_case_ ( self , A__ ):
"""simple docstring"""
return self.encoder.get(A__ , self.encoder.get(self.unk_token ) )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self.decoder.get(A__ , self.unk_token )
return result
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[str] = " ".join(A__ )
# make sure @@ tokens are concatenated
UpperCAmelCase_: List[Any] = "".join(string.split(A__ ) )
return string
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_: Tuple = os.path.join(
A__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_: Optional[Any] = os.path.join(
A__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A__ , ensure_ascii=A__ ) + "\n" )
UpperCAmelCase_: str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(A__ , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_: Optional[Any] = token_index
writer.write(" ".join(A__ ) + "\n" )
index += 1
return (vocab_file, merges_file) | 306 | 0 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase ( self )-> str:
_A = 0
@slow
def UpperCamelCase ( self )-> int:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_A = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_UpperCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_A = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_UpperCamelCase ) , 0 )
def UpperCamelCase ( self )-> Optional[Any]:
_A = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCamelCase ( self )-> str:
_A = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def UpperCamelCase ( self )-> List[Any]:
_A = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
# Check that tokenizer_type ≠ model_type
_A = AutoTokenizer.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCamelCase ( self )-> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCamelCase , 'vocab.txt' ) )
_A = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='bert' , use_fast=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCamelCase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCamelCase , 'merges.txt' ) )
_A = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='gpt2' , use_fast=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@require_tokenizers
def UpperCamelCase ( self )-> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCamelCase , 'vocab.txt' ) )
_A = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='bert' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCamelCase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCamelCase , 'merges.txt' ) )
_A = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type='gpt2' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase ( self )-> Optional[Any]:
with pytest.raises(_UpperCamelCase ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def UpperCamelCase ( self )-> List[str]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_A = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , _UpperCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def UpperCamelCase ( self )-> Any:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_UpperCamelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
_A = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def UpperCamelCase ( self )-> Union[str, Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
_A = TOKENIZER_MAPPING.values()
_A = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_UpperCamelCase )
@require_tokenizers
def UpperCamelCase ( self )-> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCamelCase ) , _UpperCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , _UpperCamelCase )
@require_tokenizers
def UpperCamelCase ( self )-> Tuple:
_A = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_UpperCamelCase )
_A = 'Hello, world. How are you?'
_A = tokenizer.tokenize(_UpperCamelCase )
self.assertEqual('[UNK]' , tokens[0] )
_A = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_UpperCamelCase )
_A = tokenizer.tokenize(_UpperCamelCase )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def UpperCamelCase ( self )-> Optional[Any]:
_A = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def UpperCamelCase ( self )-> List[Any]:
_A = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
_A = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def UpperCamelCase ( self )-> Optional[int]:
_A = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase ( self )-> Tuple:
# Check we can load the tokenizer config of an online model.
_A = get_tokenizer_config('bert-base-cased' )
_A = config.pop('_commit_hash' , _UpperCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_UpperCamelCase , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_A = get_tokenizer_config(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_A = AutoTokenizer.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
_A = get_tokenizer_config(_UpperCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def UpperCamelCase ( self )-> Dict:
try:
AutoConfig.register('custom' , _UpperCamelCase )
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
_A = CustomTokenizer.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
_A = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCamelCase ( self )-> str:
try:
AutoConfig.register('custom' , _UpperCamelCase )
# Can register in two steps
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_A = BertTokenizerFast.from_pretrained(_UpperCamelCase )
bert_tokenizer.save_pretrained(_UpperCamelCase )
_A = CustomTokenizerFast.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
_A = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
_A = AutoTokenizer.from_pretrained(_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase ( self )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_UpperCamelCase ):
_A = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCamelCase ):
_A = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase )
_A = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
_A = AutoTokenizer.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
_A = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
_A = AutoTokenizer.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def UpperCamelCase ( self )-> Optional[Any]:
class lowerCAmelCase_ ( UpperCAmelCase ):
__UpperCAmelCase =False
class lowerCAmelCase_ ( UpperCAmelCase ):
__UpperCAmelCase =NewTokenizer
__UpperCAmelCase =False
try:
AutoConfig.register('custom' , _UpperCamelCase )
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
# If remote code is not set, the default is to use local
_A = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
_A = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_A = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
_A = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_A = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
_A = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase ( self )-> List[str]:
_A = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
_A = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def UpperCamelCase ( self )-> List[Any]:
with self.assertRaisesRegex(
_UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
_A = AutoTokenizer.from_pretrained('bert-base' )
def UpperCamelCase ( self )-> List[Any]:
with self.assertRaisesRegex(
_UpperCamelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_A = AutoTokenizer.from_pretrained(_UpperCamelCase , revision='aaaaaa' )
def UpperCamelCase ( self )-> Any:
# Make sure we have cached the tokenizer.
_A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 292 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCAmelCase_ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=2 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase=None , _UpperCamelCase=2 , _UpperCamelCase=2 , )-> Tuple:
_A = parent
_A = batch_size
_A = patch_size
_A = max_length
_A = num_mel_bins
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = frequency_stride
_A = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_A = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_A = (self.max_length - self.patch_size) // self.time_stride + 1
_A = frequency_out_dimension * time_out_dimension
_A = num_patches + 2
def UpperCamelCase ( self )-> int:
_A = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, input_values, labels
def UpperCamelCase ( self )-> int:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )-> Optional[Any]:
_A = ASTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_A = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self )-> Any:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_values': input_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase =(
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )-> Union[str, Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase ( self )-> List[str]:
_A = ASTModelTester(self )
_A = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def UpperCamelCase ( self )-> List[Any]:
pass
def UpperCamelCase ( self )-> str:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self )-> List[str]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCamelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['input_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase ( self )-> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
@slow
def UpperCamelCase ( self )-> Tuple:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ASTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_A = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
_A , _A = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self )-> Dict:
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase ( self )-> Any:
_A = self.default_feature_extractor
_A = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(_UpperCamelCase )
_A = self.default_feature_extractor
_A , _A = prepare_audio()
_A = audio.squeeze().numpy()
_A = feature_extractor(_UpperCamelCase , sampling_rate=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCamelCase )
# verify the logits
_A = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_A = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 292 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Union[str, Any] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : str = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: List[str] , __A: Optional[Any] , __A: List[Any]=13 , __A: Optional[int]=7 , __A: Any=True , __A: str=True , __A: Any=True , __A: str=True , __A: Optional[Any]=99 , __A: Union[str, Any]=32 , __A: str=5 , __A: Any=4 , __A: List[str]=37 , __A: Union[str, Any]="gelu" , __A: str=0.1 , __A: Tuple=0.1 , __A: Optional[Any]=512 , __A: Union[str, Any]=16 , __A: str=2 , __A: Any=0.0_2 , __A: Tuple=4 , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_attention_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_choices
def lowercase ( self: int ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_attention_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ ,a__ ,a__ ,a__ = config_and_inputs
a__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase ( self: int ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ ,a__ ,a__ ,a__ = config_and_inputs
a__ = True
a__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =(
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase ( self: Tuple ):
'''simple docstring'''
a__ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase ( self: int ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a__ = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__A )
a__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self: Dict ):
'''simple docstring'''
a__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__A )
a__ = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
a__ = model(__A )[0]
a__ = [1, 11, 50265]
self.assertEqual(list(output.shape ) , __A )
# compare the actual values for a slice.
a__ = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@slow
def lowercase ( self: str ):
'''simple docstring'''
a__ = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__A )
a__ = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
a__ = model(__A )[0]
# compare the actual values for a slice.
a__ = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 200 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
a = "facebook/wmt19-en-de"
a = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
a = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
a = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
a = tokenizer(["Making tiny model"], return_tensors="pt")
a = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
a = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de | 518 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 518 | 1 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger('''transformers.models.speecht5''')
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
hf_model.apply_weight_norm()
_SCREAMING_SNAKE_CASE : str = checkpoint["input_conv.weight_g"]
_SCREAMING_SNAKE_CASE : Any = checkpoint["input_conv.weight_v"]
_SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
_SCREAMING_SNAKE_CASE : str = checkpoint[f'''upsamples.{i}.1.weight_g''']
_SCREAMING_SNAKE_CASE : Dict = checkpoint[f'''upsamples.{i}.1.weight_v''']
_SCREAMING_SNAKE_CASE : int = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_SCREAMING_SNAKE_CASE : Dict = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
_SCREAMING_SNAKE_CASE : List[str] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
_SCREAMING_SNAKE_CASE : List[str] = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
_SCREAMING_SNAKE_CASE : Any = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
_SCREAMING_SNAKE_CASE : Dict = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
_SCREAMING_SNAKE_CASE : int = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
_SCREAMING_SNAKE_CASE : str = checkpoint["output_conv.1.weight_g"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint["output_conv.1.weight_v"]
_SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[int]=None, lowerCamelCase__ : Optional[Any]=None, ) -> List[Any]:
if config_path is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase__ )
else:
_SCREAMING_SNAKE_CASE : int = SpeechTaHifiGanConfig()
_SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGan(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = torch.load(lowerCamelCase__ )
load_weights(orig_checkpoint["model"]["generator"], lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[int] = np.load(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
_SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
_SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCamelCase__ ).float()
_SCREAMING_SNAKE_CASE : str = torch.from_numpy(lowerCamelCase__ ).float()
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 295 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase_ : Tuple = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : int=None, lowerCamelCase__ : Any=None, lowerCamelCase__ : Any=None ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = True
while ask_again:
_SCREAMING_SNAKE_CASE : List[str] = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict=[], lowerCamelCase__ : Optional[int]=None, lowerCamelCase__ : str=0 ) -> str:
_SCREAMING_SNAKE_CASE : int = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any] ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> Dict:
_SCREAMING_SNAKE_CASE : int = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = super()._format_usage(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : Any = usage.replace("<command> [<args>] " , "" )
return usage
| 295 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_a = logging.getLogger(__name__)
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''', type=__snake_case, default='''wikitext''', help='''Name of the training. Explore datasets at: hf.co/datasets.''', )
parser.add_argument(
'''--dataset_config''', type=__snake_case, default='''wikitext-103-raw-v1''', help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''', type=__snake_case, default='''sayakpaul/unigram-tokenizer-wikitext''', help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''', )
parser.add_argument(
'''--shard_size''', type=__snake_case, default=10_00, help='''Number of entries to go in a single shard.''', )
parser.add_argument('''--split''', type=__snake_case, default='''train''', choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''', default=__snake_case, type=__snake_case, help='''Limit the number of shards (used for debugging).''', )
parser.add_argument(
'''--max_length''', type=__snake_case, default=5_12, help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''', )
parser.add_argument(
'''--output_dir''', default='''tf-tpu''', type=__snake_case, help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''', )
_UpperCamelCase = parser.parse_args()
return args
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
def fn(__snake_case ):
return tokenizer(examples['''text'''] )
return fn
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
_UpperCamelCase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
_UpperCamelCase = tf.train.Features(feature=__snake_case )
_UpperCamelCase = tf.train.Example(features=__snake_case )
_UpperCamelCase = example.SerializeToString()
records.append(__snake_case )
return records
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
_UpperCamelCase = min(len(__snake_case ), args.limit )
_UpperCamelCase = dataset.select(range(__snake_case ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCamelCase = os.path.join(args.output_dir, args.split )
if not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
else:
_UpperCamelCase = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
_UpperCamelCase = tokenize_function(__snake_case )
_UpperCamelCase = dataset.map(__snake_case, batched=__snake_case, num_proc=4, remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__snake_case ):
# Concatenate all texts.
_UpperCamelCase = {k: sum(examples[k], [] ) for k in examples.keys()}
_UpperCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCamelCase = {
k: [t[i : i + args.max_length] for i in range(0, __snake_case, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCamelCase = dataset_tokenized.map(__snake_case, batched=__snake_case, batch_size=10_00, num_proc=4 )
_UpperCamelCase = 0
_UpperCamelCase = 0
for shard in range(0, len(__snake_case ), args.shard_size ):
_UpperCamelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCamelCase = len(dataset_snapshot['''input_ids'''] )
_UpperCamelCase = os.path.join(__snake_case, F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCamelCase = get_serialized_examples(__snake_case )
with tf.io.TFRecordWriter(__snake_case ) as out_file:
for i in range(len(__snake_case ) ):
_UpperCamelCase = serialized_examples[i]
out_file.write(__snake_case )
print('''Wrote file {} containing {} records'''.format(__snake_case, __snake_case ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''', '''w''' ) as f:
print(F'''Total {args.split} records: {total_records}''', file=__snake_case )
if __name__ == "__main__":
_a = parse_args()
main(args)
| 19 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""ViTFeatureExtractor"""]
_UpperCamelCase = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 111 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] =["""input_ids""", """attention_mask"""]
UpperCamelCase__ : List[Any] =GPTaTokenizer
def __init__( self : Tuple, __lowercase : Any=None, __lowercase : Dict=None, __lowercase : Union[str, Any]=None, __lowercase : int="<|endoftext|>", __lowercase : Tuple="<|endoftext|>", __lowercase : Dict="<|endoftext|>", __lowercase : Dict=False, **__lowercase : Optional[int], ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, unk_token=__lowercase, bos_token=__lowercase, eos_token=__lowercase, add_prefix_space=__lowercase, **__lowercase, )
lowercase__ = kwargs.pop("add_bos_token", __lowercase )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
def A__ ( self : List[str], *__lowercase : Dict, **__lowercase : Tuple ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : Any, *__lowercase : int, **__lowercase : Optional[Any] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : List[str], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : List[Any], __lowercase : "Conversation" ):
lowercase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase, add_special_tokens=__lowercase ) + [self.eos_token_id] )
if len(__lowercase ) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
return input_ids
| 705 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ )
lowercase__ = True
lowercase__ = True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ = model_class(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ = cached_file(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if compare_with_pt_model:
lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network
lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
lowercase__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowercase__ = pt_model(**pt_model.dummy_inputs )
lowercase__ = pto[0].numpy()
lowercase__ = tfo[0].numpy()
lowercase__ = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ):
if args_model_type is None:
lowercase__ = list(MODEL_CLASSES.keys() )
else:
lowercase__ = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
lowercase__ = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE_ )
os.remove(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 37 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
SCREAMING_SNAKE_CASE__ : Any = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : int = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
SCREAMING_SNAKE_CASE__ : List[str] = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
'emoji': True,
},
}
]
SCREAMING_SNAKE_CASE__ : Dict = 0
for log in Path().glob('*.log'):
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
with open(log, 'r') as f:
for line in f:
SCREAMING_SNAKE_CASE__ : int = json.loads(line)
if line.get('nodeid', '') != "":
SCREAMING_SNAKE_CASE__ : Optional[Any] = line['nodeid']
if line.get('duration', None) is not None:
SCREAMING_SNAKE_CASE__ : int = F'{line["duration"]:.4f}'
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
SCREAMING_SNAKE_CASE__ : str = []
log.unlink()
SCREAMING_SNAKE_CASE__ : List[Any] = ''
SCREAMING_SNAKE_CASE__ : Any = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : str = {}
for test in failed_tests:
SCREAMING_SNAKE_CASE__ : Optional[int] = test[0].split('::')
SCREAMING_SNAKE_CASE__ : Any = data[0].split('/')[-1]
if data[0] not in filesafailed:
SCREAMING_SNAKE_CASE__ : Tuple = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
SCREAMING_SNAKE_CASE__ : List[Any] = [test[0] for test in failed_table]
SCREAMING_SNAKE_CASE__ : Dict = list(set(files))
# Count number of instances in failed_tests
SCREAMING_SNAKE_CASE__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
SCREAMING_SNAKE_CASE__ : List[Any] = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
SCREAMING_SNAKE_CASE__ : List[str] = 'Too many failed tests, please see the full report in the Action results.'
SCREAMING_SNAKE_CASE__ : Any = len(err) + 10
SCREAMING_SNAKE_CASE__ : List[str] = message[: 3_000 - offset] + F'\n...\n```\n{err}'
print(F'### {message}')
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'No failed tests! 🤗'
print(F'## {message}')
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE__ : Optional[int] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
SCREAMING_SNAKE_CASE__ : List[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
SCREAMING_SNAKE_CASE__ : List[str] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
SCREAMING_SNAKE_CASE__ : Tuple = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
SCREAMING_SNAKE_CASE__ : str = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
SCREAMING_SNAKE_CASE__ : Any = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
SCREAMING_SNAKE_CASE__ : Optional[int] = row[0]
else:
SCREAMING_SNAKE_CASE__ : List[str] = ''
SCREAMING_SNAKE_CASE__ : List[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 643 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def a__ ( snake_case__ : Dict ):
_UpperCAmelCase : str = [False] * len(snake_case__ )
_UpperCAmelCase : str = [-1] * len(snake_case__ )
def dfs(snake_case__ : Dict , snake_case__ : Optional[Any] ):
_UpperCAmelCase : str = True
_UpperCAmelCase : Optional[Any] = c
for u in graph[v]:
if not visited[u]:
dfs(snake_case__ , 1 - c )
for i in range(len(snake_case__ ) ):
if not visited[i]:
dfs(snake_case__ , 0 )
for i in range(len(snake_case__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 643 | 1 |
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase__ ( _A , _A , _A , _A = 100 , ):
"""simple docstring"""
a_ = x_start
a_ = fnc(_A )
a_ = 0.0
for _ in range(_A ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
a_ = (x_end - x_start) / steps + xa
a_ = fnc(_A )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
a_ = xa
a_ = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
UpperCamelCase__ = 10
while i <= 100_000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 143 |
from __future__ import annotations
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
a_ = [True] * limit
a_ = False
a_ = False
a_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
a_ = i * 2
while index < limit:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 , _A , 2 ):
if is_prime[i]:
primes.append(_A )
return primes
def UpperCAmelCase__ ( _A = 1_000_000 ):
"""simple docstring"""
a_ = prime_sieve(_A )
a_ = 0
a_ = 0
for i in range(len(_A ) ):
for j in range(i + length , len(_A ) ):
a_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
a_ = j - i
a_ = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 143 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = [[float('''inf''' ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
__lowercase : Tuple = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__UpperCamelCase ):
# looping through rows of graph array
for i in range(__UpperCamelCase ):
# looping through columns of graph array
for j in range(__UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__lowercase : List[Any] = dist[i][k] + dist[k][j]
_print_dist(__UpperCamelCase , __UpperCamelCase )
return dist, v
if __name__ == "__main__":
a_ = int(input('Enter number of vertices: '))
a_ = int(input('Enter number of edges: '))
a_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
a_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
a_ = int(input('Enter source:'))
a_ = int(input('Enter destination:'))
a_ = float(input('Enter weight:'))
a_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 76 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'llama'
__UpperCamelCase = ['past_key_values']
def __init__(self , lowerCamelCase=32_000 , lowerCamelCase=4_096 , lowerCamelCase=11_008 , lowerCamelCase=32 , lowerCamelCase=32 , lowerCamelCase=None , lowerCamelCase="silu" , lowerCamelCase=2_048 , lowerCamelCase=0.02 , lowerCamelCase=1e-6 , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = num_key_value_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = initializer_range
_lowerCAmelCase = rms_norm_eps
_lowerCAmelCase = pretraining_tp
_lowerCAmelCase = use_cache
_lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , tie_word_embeddings=lowerCamelCase , **lowerCamelCase , )
def A__ (self ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
_lowerCAmelCase = self.rope_scaling.get("""type""" , lowerCamelCase )
_lowerCAmelCase = self.rope_scaling.get("""factor""" , lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase , lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" ) | 156 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a ( A_ , unittest.TestCase ):
A_ : Union[str, Any] = AlbertTokenizer
A_ : Tuple = AlbertTokenizerFast
A_ : Dict = True
A_ : Tuple = True
A_ : Union[str, Any] = True
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__a = AlbertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ) -> Optional[Any]:
__a = """this is a test"""
__a = """this is a test"""
return input_text, output_text
def lowerCAmelCase_ ( self : str ) -> Dict:
__a = """<pad>"""
__a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Tuple ) -> Tuple:
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(lowerCamelCase_ ) , 3_00_00 )
def lowerCAmelCase_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def lowerCAmelCase_ ( self : List[str] ) -> Any:
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = """I was born in 92000, and this is falsé."""
__a = tokenizer.tokenize(lowerCamelCase_ )
__a = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
__a = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
__a = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(lowerCamelCase_ )
__a = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Dict ) -> Dict:
__a = AlbertTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
__a = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [48, 25, 21, 12_89] )
__a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
__a = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
__a = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def lowerCAmelCase_ ( self : Tuple ) -> int:
__a = AlbertTokenizer(lowerCamelCase_ )
__a = tokenizer.encode("""sequence builders""" )
__a = tokenizer.encode("""multi-sequence build""" )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
# fmt: off
__a = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 173 | """simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__A = logging.get_logger(__name__)
# General docstring
__A = """RegNetConfig"""
# Base docstring
__A = """facebook/regnet-y-040"""
__A = [1, 10_88, 7, 7]
# Image classification docstring
__A = """facebook/regnet-y-040"""
__A = """tabby, tabby cat"""
__A = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[str] = "relu" , **lowerCamelCase_ : Union[str, Any] , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__a = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__a = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=lowerCamelCase_ , strides=lowerCamelCase_ , padding="""VALID""" , groups=lowerCamelCase_ , use_bias=lowerCamelCase_ , name="""convolution""" , )
__a = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
__a = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] ) -> Optional[Any]:
__a = self.convolution(self.padding(lowerCamelCase_ ) )
__a = self.normalization(lowerCamelCase_ )
__a = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : RegNetConfig , **lowerCamelCase_ : Tuple ) -> List[Any]:
super().__init__(**lowerCamelCase_ )
__a = config.num_channels
__a = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCAmelCase_ ( self : Dict , lowerCamelCase_ : Tuple ) -> List[str]:
__a = shape_list(lowerCamelCase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__a = tf.transpose(lowerCamelCase_ , perm=(0, 2, 3, 1) )
__a = self.embedder(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : int = 2 , **lowerCamelCase_ : Optional[int] ) -> Any:
super().__init__(**lowerCamelCase_ )
__a = tf.keras.layers.ConvaD(
filters=lowerCamelCase_ , kernel_size=1 , strides=lowerCamelCase_ , use_bias=lowerCamelCase_ , name="""convolution""" )
__a = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(lowerCamelCase_ ) , training=lowerCamelCase_ )
class a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , **lowerCamelCase_ : List[Any] ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
__a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name="""pooler""" )
__a = [
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase_ , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCAmelCase_ ( self : int , lowerCamelCase_ : Dict ) -> int:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__a = self.pooler(lowerCamelCase_ )
for layer_module in self.attention:
__a = layer_module(lowerCamelCase_ )
__a = hidden_state * pooled
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Optional[int] ) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
__a = in_channels != out_channels or stride != 1
__a = max(1 , out_channels // config.groups_width )
__a = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__a = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name="""layer.2""" ),
]
__a = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Dict , lowerCamelCase_ : List[str] ) -> Tuple:
__a = hidden_state
for layer_module in self.layers:
__a = layer_module(lowerCamelCase_ )
__a = self.shortcut(lowerCamelCase_ )
hidden_state += residual
__a = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Optional[Any] ) -> Dict:
super().__init__(**lowerCamelCase_ )
__a = in_channels != out_channels or stride != 1
__a = max(1 , out_channels // config.groups_width )
__a = (
TFRegNetShortCut(lowerCamelCase_ , stride=lowerCamelCase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
__a = [
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ , name="""layer.3""" ),
]
__a = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
__a = hidden_state
for layer_module in self.layers:
__a = layer_module(lowerCamelCase_ )
__a = self.shortcut(lowerCamelCase_ )
hidden_state += residual
__a = self.activation(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : List[str] , lowerCamelCase_ : RegNetConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 2 , **lowerCamelCase_ : List[Any] ) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
__a = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
__a = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , name="""layers.0""" ),
*[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self : Dict , lowerCamelCase_ : List[str] ) -> int:
for layer_module in self.layers:
__a = layer_module(lowerCamelCase_ )
return hidden_state
class a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase_ : RegNetConfig , **lowerCamelCase_ : Any ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
__a = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
__a = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
__a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__a = hidden_states + (hidden_state,)
__a = stage_module(lowerCamelCase_ )
if output_hidden_states:
__a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
@keras_serializable
class a ( tf.keras.layers.Layer ):
A_ : str = RegNetConfig
def __init__( self : List[str] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ) -> Tuple:
super().__init__(**lowerCamelCase_ )
__a = config
__a = TFRegNetEmbeddings(lowerCamelCase_ , name="""embedder""" )
__a = TFRegNetEncoder(lowerCamelCase_ , name="""encoder""" )
__a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase_ , name="""pooler""" )
@unpack_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.embedder(lowerCamelCase_ , training=lowerCamelCase_ )
__a = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase_ )
# Change to NCHW output format have uniformity in the modules
__a = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
__a = tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__a = tuple([tf.transpose(lowerCamelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a ( A_ ):
A_ : str = RegNetConfig
A_ : Tuple = '''regnet'''
A_ : Tuple = '''pixel_values'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
__A = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
__A = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , A_ , )
class a ( A_ ):
def __init__( self : Tuple , lowerCamelCase_ : RegNetConfig , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[Any] ) -> List[str]:
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
__a = TFRegNetMainLayer(lowerCamelCase_ , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self : str , lowerCamelCase_ : tf.Tensor , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Any=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.regnet(
pixel_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , A_ , )
class a ( A_ , A_ ):
def __init__( self : int , lowerCamelCase_ : RegNetConfig , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Tuple ) -> Union[str, Any]:
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
__a = config.num_labels
__a = TFRegNetMainLayer(lowerCamelCase_ , name="""regnet""" )
# classification head
__a = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self : int , lowerCamelCase_ : tf.Tensor = None , lowerCamelCase_ : tf.Tensor = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[int]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.regnet(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ , training=lowerCamelCase_ )
__a = outputs.pooler_output if return_dict else outputs[1]
__a = self.classifier[0](lowerCamelCase_ )
__a = self.classifier[1](lowerCamelCase_ )
__a = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase_ , logits=lowerCamelCase_ )
if not return_dict:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
| 173 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] = 0 , __lowerCamelCase : Optional[int] = 0 ):
'''simple docstring'''
_UpperCAmelCase : Dict =right or len(__lowerCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__lowerCamelCase , __lowerCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 446 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase ( UpperCAmelCase )-> int:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = module
SCREAMING_SNAKE_CASE_ = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
SCREAMING_SNAKE_CASE_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _lowercase ( self : List[Any] , lowerCAmelCase_ : Any , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class snake_case ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = """bigscience/bloom-1b7"""
# Constant values
UpperCAmelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
UpperCAmelCase : int = """Hello my name is"""
UpperCAmelCase : List[str] = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
UpperCAmelCase : Dict = 10
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(self.model_name )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , '''quantization_config''' ) )
SCREAMING_SNAKE_CASE_ = config.to_dict()
SCREAMING_SNAKE_CASE_ = config.to_diff_dict()
SCREAMING_SNAKE_CASE_ = config.to_json_string()
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE_ = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE_ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE_ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE_ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ = self.model_fpaa.float()
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''t5-small'''
SCREAMING_SNAKE_CASE_ = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE_ = '''Translate in German: Hello, my dog is cute'''
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE_ = None
# test with `t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = modules
def _lowercase ( self : Optional[Any] ) -> int:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**lowerCAmelCase_ )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# model_name
SCREAMING_SNAKE_CASE_ = '''bigscience/bloom-560m'''
SCREAMING_SNAKE_CASE_ = '''t5-small'''
# Different types of model
SCREAMING_SNAKE_CASE_ = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
# Sequence classification model
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
# CausalLM model
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
# Seq2seq model
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().setUp()
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE_ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
SCREAMING_SNAKE_CASE_ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''facebook/opt-350m'''
super().setUp()
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE_ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE_ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE_ = LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE_ = LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE_ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE_ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE_ = model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = """gpt2-xl"""
UpperCAmelCase : str = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 393 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__UpperCamelCase : Optional[int] = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
__UpperCamelCase : int = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
__UpperCamelCase : List[Any] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
__UpperCamelCase : str = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
__UpperCamelCase : Optional[Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def A ( _lowercase , _lowercase ):
for tf_name, hf_name in patterns:
SCREAMING_SNAKE_CASE : str = k.replace(_lowercase , _lowercase )
return k
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = BigBirdPegasusConfig(**_lowercase )
SCREAMING_SNAKE_CASE : Tuple = BigBirdPegasusForConditionalGeneration(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = torch_model.state_dict()
SCREAMING_SNAKE_CASE : Any = {}
# separating decoder weights
SCREAMING_SNAKE_CASE : str = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
SCREAMING_SNAKE_CASE : Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
SCREAMING_SNAKE_CASE : int = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
SCREAMING_SNAKE_CASE : Tuple = DECODER_PATTERNS
SCREAMING_SNAKE_CASE : Tuple = rename_state_dict_key(_lowercase , _lowercase )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = v.T
SCREAMING_SNAKE_CASE : int = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
SCREAMING_SNAKE_CASE : str = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
SCREAMING_SNAKE_CASE : Optional[int] = REMAINING_PATTERNS
SCREAMING_SNAKE_CASE : Dict = rename_state_dict_key(_lowercase , _lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
SCREAMING_SNAKE_CASE : Optional[Any] = v.T
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
SCREAMING_SNAKE_CASE : Dict = mapping['''model.embed_positions.weight''']
SCREAMING_SNAKE_CASE : Dict = mapping.pop('''model.embed_positions.weight''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = torch_model.load_state_dict(_lowercase , strict=_lowercase )
SCREAMING_SNAKE_CASE : str = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = tf.train.list_variables(_lowercase )
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Optional[Any] = ['''global_step''']
for name, shape in tqdm(_lowercase , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : List[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : List[Any] = tf.train.load_variable(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[str] = array
return tf_weights
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = get_tf_weights_as_numpy(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = convert_bigbird_pegasus(_lowercase , _lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__UpperCamelCase : str = parser.parse_args()
__UpperCamelCase : List[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 34 | import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34 | 1 |
'''simple docstring'''
def lowercase__( _UpperCamelCase : list[int] , _UpperCamelCase : int )-> bool:
"""simple docstring"""
_UpperCamelCase = len(lowerCAmelCase_ )
_UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowercase = Features({'text': Value('string' )} )
_lowercase = Features({'labels': ClassLabel} )
_lowercase = "text"
_lowercase = "labels"
def __lowerCamelCase ( self , __UpperCAmelCase ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE_ : Optional[int] =copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ : Any =self.label_schema.copy()
SCREAMING_SNAKE_CASE_ : Any =features[self.label_column]
SCREAMING_SNAKE_CASE_ : int =label_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 220 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __A(lowerCAmelCase=3_2 , lowerCAmelCase=1_0 , lowerCAmelCase=1_0_0 , lowerCAmelCase=1_0_2_6 , lowerCAmelCase=True , lowerCAmelCase="data/tokenized_stories_train_wikitext103.jbl" , lowerCAmelCase="igf_context_pairs.jbl" , ) -> Tuple:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
_UpperCamelCase , _UpperCamelCase = generate_datasets(
lowerCAmelCase , lowerCAmelCase , number=lowerCAmelCase , min_len=1_0_2_6 , trim=lowerCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_UpperCamelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_UpperCamelCase = load_gpta("""gpt2""" ).to(lowerCAmelCase )
print("""computing perplexity on objective set""" )
_UpperCamelCase = compute_perplexity(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).item()
print("""perplexity on objective set:""" , lowerCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __A(lowerCAmelCase , lowerCAmelCase=1_5 , lowerCAmelCase=1_2_8 , lowerCAmelCase=1_0_0 , lowerCAmelCase="igf_model.pt" , ) -> Tuple:
"""simple docstring"""
set_seed(4_2 )
# Load pre-trained model
_UpperCamelCase = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_UpperCamelCase = SecondaryLearner(lowerCAmelCase )
# Train secondary learner
_UpperCamelCase = train_secondary_learner(
lowerCAmelCase , lowerCAmelCase , max_epochs=lowerCAmelCase , batch_size=lowerCAmelCase , eval_freq=1_0_0 , igf_model_path=lowerCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=3_2 , lowerCAmelCase=1_0_0_0 , lowerCAmelCase=1_6 , lowerCAmelCase=1.0 , lowerCAmelCase=recopy_gpta , lowerCAmelCase=None , lowerCAmelCase=1_0 , lowerCAmelCase="gpt2_finetuned.pt" , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_UpperCamelCase = RandomSampler(lowerCAmelCase )
_UpperCamelCase = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase )
_UpperCamelCase = max_steps // (len(lowerCAmelCase )) + 1
_UpperCamelCase = 0
_UpperCamelCase = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCAmelCase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = recopy_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowerCAmelCase )
secondary_learner.eval()
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = []
_UpperCamelCase = []
# Compute the performance of the transformer model at the beginning
_UpperCamelCase = compute_perplexity(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
test_perps.append(lowerCAmelCase )
print("""Test perplexity, step""" , lowerCAmelCase , """:""" , lowerCAmelCase )
for epoch in range(int(lowerCAmelCase ) ):
for step, example in enumerate(lowerCAmelCase ):
torch.cuda.empty_cache()
_UpperCamelCase = random.randint(0 , example.size(2 ) - context_len - 1 )
_UpperCamelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_UpperCamelCase = model(lowerCAmelCase , labels=lowerCAmelCase )
_UpperCamelCase = True
if secondary_learner is not None:
_UpperCamelCase = secondary_learner.forward(
torch.tensor(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowerCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
_UpperCamelCase = -1
if predicted_q < threshold:
_UpperCamelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_UpperCamelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_UpperCamelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_UpperCamelCase = compute_perplexity(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
test_perps.append(lowerCAmelCase )
print("""Test perplexity, step""" , lowerCAmelCase , """:""" , lowerCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowerCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __A() -> Any:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=lowerCAmelCase , default=lowerCAmelCase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=lowerCAmelCase , default=lowerCAmelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase , default=lowerCAmelCase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=3_2 , type=lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=1_0_0 , type=lowerCAmelCase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=1_0_0 , type=lowerCAmelCase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_0_0_0 , type=lowerCAmelCase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=1_2_8 , type=lowerCAmelCase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=1_6 , type=lowerCAmelCase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=1_0 , type=lowerCAmelCase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=1_0_0 , type=lowerCAmelCase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_0_2_6 , type=lowerCAmelCase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=1_5 , type=lowerCAmelCase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=lowerCAmelCase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=lowerCAmelCase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=lowerCAmelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
_UpperCamelCase = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_UpperCamelCase = training_secondary_learner(
lowerCAmelCase , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
_UpperCamelCase = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
_UpperCamelCase , _UpperCamelCase = generate_datasets(
context_len=3_2 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_0_0 , min_len=1_0_2_6 , trim=lowerCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=lowerCAmelCase , secondary_learner=lowerCAmelCase , eval_interval=1_0 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 202 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , a , a ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = 1_00 , a = None , a = None , a = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
_UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate
_UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate
_UpperCamelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
_UpperCamelCase = int(a )
if sample_size % down_scale_factor != 0:
_UpperCamelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
""" process.""" )
_UpperCamelCase = int(a )
_UpperCamelCase = next(iter(self.unet.parameters() ) ).dtype
_UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(a )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_UpperCamelCase = randn_tensor(a , generator=a , device=self.device , dtype=a )
# set step values
self.scheduler.set_timesteps(a , device=audio.device )
_UpperCamelCase = self.scheduler.timesteps.to(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCamelCase = self.unet(a , a ).sample
# 2. compute previous image: x_t -> t_t-1
_UpperCamelCase = self.scheduler.step(a , a , a ).prev_sample
_UpperCamelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
_UpperCamelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=a )
| 202 | 1 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCamelCase = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCamelCase = get_tests_dir('fixtures/vocab.json')
lowerCamelCase = get_tests_dir('fixtures')
class A ( unittest.TestCase ):
UpperCamelCase__ : Dict =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict =0
def lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : List[Any] =AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : int =WavaVecaConfig()
_lowerCamelCase : Dict =AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
_lowerCamelCase : Any =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
copyfile(lowercase_ , os.path.join(lowercase_ , 'vocab.json' ) )
_lowerCamelCase : Union[str, Any] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[Any] =WavaVecaFeatureExtractor()
_lowerCamelCase : List[str] =AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
_lowerCamelCase : str =WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowercase_ , lowercase_ ) , 'r' ) as f:
_lowerCamelCase : Optional[int] =json.load(lowercase_ )
config_dict.pop('processor_class' )
with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f:
f.write(json.dumps(lowercase_ ) )
_lowerCamelCase : Optional[int] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] =WavaVecaFeatureExtractor()
_lowerCamelCase : Tuple =AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
_lowerCamelCase : Dict =WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowercase_ , lowercase_ ) , 'r' ) as f:
_lowerCamelCase : Union[str, Any] =json.load(lowercase_ )
config_dict.pop('processor_class' )
with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f:
f.write(json.dumps(lowercase_ ) )
_lowerCamelCase : Optional[int] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] =WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(lowercase_ )
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f:
f.write('{}' )
_lowerCamelCase : int =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowercase_ ):
_lowerCamelCase : int =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
_lowerCamelCase : Union[str, Any] =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
_lowerCamelCase : List[str] =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
_lowerCamelCase : int =processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
_lowerCamelCase : Optional[int] =processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
_lowerCamelCase : int =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
_lowerCamelCase : Optional[int] =new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoProcessor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : str =CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : str =os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : List[Any] =CustomTokenizer(lowercase_ )
_lowerCamelCase : Optional[int] =CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowercase_ )
_lowerCamelCase : List[Any] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Optional[Any] =False
class A ( UpperCamelCase_ ):
UpperCamelCase__ : int =False
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Union[str, Any] ='AutoFeatureExtractor'
UpperCamelCase__ : str ='AutoTokenizer'
UpperCamelCase__ : List[Any] =False
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local classes.
_lowerCamelCase : int =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_lowerCamelCase : int =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_lowerCamelCase : str =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Any =AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class A ( unittest.TestCase ):
UpperCamelCase__ : List[Any] =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCamelCase ( cls : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def lowerCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_lowerCamelCase : Tuple =WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , 'test-processor' ) , push_to_hub=lowercase_ , use_auth_token=self._token )
_lowerCamelCase : Union[str, Any] =WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : int =WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , 'test-processor-org' ) , push_to_hub=lowercase_ , use_auth_token=self._token , organization='valid_org' , )
_lowerCamelCase : str =WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_lowerCamelCase : Optional[Any] =CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Dict =os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : Any =CustomTokenizer(lowercase_ )
_lowerCamelCase : List[Any] =CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
_lowerCamelCase : List[str] =Repository(lowercase_ , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(lowercase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowercase_ , 'tokenizer_config.json' ) ) as f:
_lowerCamelCase : Union[str, Any] =json.load(lowercase_ )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_processing.py' ) ) )
repo.push_to_hub()
_lowerCamelCase : Tuple =AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 464 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A ( UpperCamelCase_ ):
UpperCamelCase__ : List[str] =(PNDMScheduler,)
UpperCamelCase__ : Dict =(('num_inference_steps', 50),)
def lowerCamelCase ( self : Dict , **lowercase_ : Dict ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_ )
return config
def lowerCamelCase ( self : Any , lowercase_ : str=0 , **lowercase_ : str ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[Any] =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Tuple =self.dummy_sample
_lowerCamelCase : int =0.1 * sample
_lowerCamelCase : int =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : str =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : str =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : Any =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Optional[Any] =scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
_lowerCamelCase : Any =dummy_past_residuals[:]
_lowerCamelCase : Union[str, Any] =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : List[str] =new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCamelCase : Optional[int] =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Optional[Any] =new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : List[str]=0 , **lowercase_ : int ) -> int:
"""simple docstring"""
_lowerCamelCase : Any =dict(self.forward_default_kwargs )
_lowerCamelCase : Dict =kwargs.pop('num_inference_steps' , lowercase_ )
_lowerCamelCase : Union[str, Any] =self.dummy_sample
_lowerCamelCase : Optional[int] =0.1 * sample
_lowerCamelCase : List[str] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Any =self.get_scheduler_config()
_lowerCamelCase : int =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCamelCase : Any =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
_lowerCamelCase : Optional[Any] =scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCamelCase : Optional[int] =dummy_past_residuals[:]
_lowerCamelCase : List[str] =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : str =new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCamelCase : Optional[Any] =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : List[str] =new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Optional[Any] , **lowercase_ : Any ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : Any =scheduler_class(**lowercase_ )
_lowerCamelCase : Union[str, Any] =10
_lowerCamelCase : str =self.dummy_model()
_lowerCamelCase : Union[str, Any] =self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCamelCase : Union[str, Any] =model(lowercase_ , lowercase_ )
_lowerCamelCase : Any =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCamelCase : List[Any] =model(lowercase_ , lowercase_ )
_lowerCamelCase : int =scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Any =dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[int] =kwargs.pop('num_inference_steps' , lowercase_ )
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : List[str] =self.get_scheduler_config()
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
_lowerCamelCase : Union[str, Any] =self.dummy_sample
_lowerCamelCase : Optional[int] =0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , 'set_timesteps' ):
_lowerCamelCase : Tuple =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCamelCase : List[Any] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCamelCase : Union[str, Any] =dummy_past_residuals[:]
_lowerCamelCase : List[Any] =scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : str =scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCamelCase : Tuple =scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
_lowerCamelCase : Optional[Any] =scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
_lowerCamelCase : Optional[int] =self.scheduler_classes[0]
_lowerCamelCase : Dict =self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowerCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =27
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Dict =self.dummy_sample
_lowerCamelCase : List[Any] =0.1 * sample
_lowerCamelCase : List[Any] =self.get_scheduler_config()
_lowerCamelCase : List[str] =scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCamelCase : Tuple =scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowercase_ ):
_lowerCamelCase : Dict =self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] =self.get_scheduler_config()
_lowerCamelCase : List[Any] =scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.full_loop()
_lowerCamelCase : Optional[Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Union[str, Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =self.full_loop(prediction_type='v_prediction' )
_lowerCamelCase : Dict =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : str =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_lowerCamelCase : Optional[Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Union[str, Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_lowerCamelCase : Union[str, Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : str =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 464 | 1 |
import math
import sys
def _lowerCamelCase ( __A : str ) -> str:
_UpperCAmelCase : List[Any] = ''''''
try:
with open(__A , '''rb''' ) as binary_file:
_UpperCAmelCase : Optional[int] = binary_file.read()
for dat in data:
_UpperCAmelCase : Dict = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCamelCase ( __A : str ) -> str:
_UpperCAmelCase : Optional[Any] = {'''0''': '''0''', '''1''': '''1'''}
_UpperCAmelCase , _UpperCAmelCase : Dict = '''''', ''''''
_UpperCAmelCase : Any = len(__A )
for i in range(len(__A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase : Tuple = lexicon[curr_string]
result += last_match_id
_UpperCAmelCase : List[Any] = last_match_id + '''0'''
if math.loga(__A ).is_integer():
_UpperCAmelCase : Union[str, Any] = {}
for curr_key in list(__A ):
_UpperCAmelCase : Dict = lexicon.pop(__A )
_UpperCAmelCase : List[Any] = new_lex
_UpperCAmelCase : str = last_match_id + '''1'''
index += 1
_UpperCAmelCase : List[str] = ''''''
return result
def _lowerCamelCase ( __A : str , __A : str ) -> None:
_UpperCAmelCase : Optional[int] = 8
try:
with open(__A , '''wb''' ) as opened_file:
_UpperCAmelCase : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(__A ) , __A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__A , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCamelCase ( __A : str ) -> str:
_UpperCAmelCase : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_UpperCAmelCase : List[str] = data_bits[counter:]
_UpperCAmelCase : Any = data_bits[counter + 1 :]
return data_bits
def _lowerCamelCase ( __A : str , __A : str ) -> None:
_UpperCAmelCase : List[Any] = read_file_binary(__A )
_UpperCAmelCase : Optional[Any] = remove_prefix(__A )
_UpperCAmelCase : List[str] = decompress_data(__A )
write_file_binary(__A , __A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 186 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = StableDiffusionXLImgaImgPipeline
_SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
_SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
_SCREAMING_SNAKE_CASE : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=_A , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_UpperCAmelCase : int = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0)
_UpperCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
_UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
_UpperCAmelCase : int = CLIPTextModel(_A)
_UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_A)
_UpperCAmelCase : int = CLIPTextModelWithProjection(_A)
_UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_A)
_UpperCAmelCase : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , _A , _A=0) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A)).to(_A)
_UpperCAmelCase : List[Any] = image / 2 + 0.5
if str(_A).startswith('''mps'''):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(_A)
else:
_UpperCAmelCase : List[str] = torch.Generator(device=_A).manual_seed(_A)
_UpperCAmelCase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : Any = StableDiffusionXLImgaImgPipeline(**_A)
_UpperCAmelCase : List[str] = sd_pipe.to(_A)
sd_pipe.set_progress_bar_config(disable=_A)
_UpperCAmelCase : Tuple = self.get_dummy_inputs(_A)
_UpperCAmelCase : Any = sd_pipe(**_A).images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : Optional[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : str = StableDiffusionXLImgaImgPipeline(**_A)
_UpperCAmelCase : Any = sd_pipe.to(_A)
_UpperCAmelCase : Tuple = sd_pipe.to(_A)
sd_pipe.set_progress_bar_config(disable=_A)
# forward without prompt embeds
_UpperCAmelCase : str = self.get_dummy_inputs(_A)
_UpperCAmelCase : Optional[Any] = 3 * ['''this is a negative prompt''']
_UpperCAmelCase : Optional[int] = negative_prompt
_UpperCAmelCase : Optional[int] = 3 * [inputs['''prompt''']]
_UpperCAmelCase : Optional[Any] = sd_pipe(**_A)
_UpperCAmelCase : Any = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_UpperCAmelCase : Dict = self.get_dummy_inputs(_A)
_UpperCAmelCase : Optional[Any] = 3 * ['''this is a negative prompt''']
_UpperCAmelCase : Dict = 3 * [inputs.pop('''prompt''')]
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : str = sd_pipe.encode_prompt(_A , negative_prompt=_A)
_UpperCAmelCase : str = sd_pipe(
**_A , prompt_embeds=_A , negative_prompt_embeds=_A , pooled_prompt_embeds=_A , negative_pooled_prompt_embeds=_A , )
_UpperCAmelCase : List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , _A , _A="cpu" , _A=torch.floataa , _A=0) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = torch.Generator(device=_A).manual_seed(_A)
_UpperCAmelCase : Any = np.random.RandomState(_A).standard_normal((1, 4, 64, 64))
_UpperCAmelCase : Dict = torch.from_numpy(_A).to(device=_A , dtype=_A)
_UpperCAmelCase : str = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''')
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
_UpperCAmelCase : str = self.get_inputs(_A)
_UpperCAmelCase : Union[str, Any] = pipe(**_A).images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase : int = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506])
assert np.abs(image_slice - expected_slice).max() < 7e-3
| 186 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ):
__lowerCamelCase : List[Any] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : int = seq_length
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : List[Any] = use_attention_mask
__lowerCamelCase : List[Any] = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : str = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : str = type_vocab_size
__lowerCamelCase : Union[str, Any] = type_sequence_label_size
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : int = num_choices
def snake_case_ ( self ):
__lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ):
__lowerCamelCase : int = self.prepare_config_and_inputs()
__lowerCamelCase : Optional[int] = config_and_inputs
__lowerCamelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case_ ( self ):
__lowerCamelCase : Any = self.prepare_config_and_inputs()
__lowerCamelCase : Union[str, Any] = config_and_inputs
__lowerCamelCase : int = True
__lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Optional[Any] = True
__a : Dict = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ):
__lowerCamelCase : Union[str, Any] = FlaxBertModelTester(self )
@slow
def snake_case_ ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
__lowerCamelCase : int = FlaxBertModel.from_pretrained('bert-base-cased' )
__lowerCamelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
| 594 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case ( __UpperCAmelCase ):
pass
class snake_case :
def __init__( self :List[Any] , _lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : Any = data
__SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : List[str] = self
__SCREAMING_SNAKE_CASE : List[str] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_lowerCamelCase )
yield node.data
__SCREAMING_SNAKE_CASE : List[str] = node.next_node
@property
def SCREAMING_SNAKE_CASE_ ( self :Any ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_lowerCamelCase = Node(1)
_lowerCamelCase = Node(2)
_lowerCamelCase = Node(3)
_lowerCamelCase = Node(4)
print(root_node.has_loop) # False
_lowerCamelCase = root_node.next_node
print(root_node.has_loop) # True
_lowerCamelCase = Node(5)
_lowerCamelCase = Node(6)
_lowerCamelCase = Node(5)
_lowerCamelCase = Node(6)
print(root_node.has_loop) # False
_lowerCamelCase = Node(1)
print(root_node.has_loop) # False
| 674 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__a = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 2048-bit
1_4: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 3072-bit
1_5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 4096-bit
1_6: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 6144-bit
1_7: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 8192-bit
1_8: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
}
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case__ : int = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError("Unsupported Group" )
A =primes[group]["prime"]
A =primes[group]["generator"]
A =int(hexlify(urandom(32 ) ) , base=16 )
def _a ( self : Optional[int] ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _a ( self : Optional[Any] , snake_case__ : int ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _a ( self : Tuple , snake_case__ : str ):
"""simple docstring"""
A =int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("Invalid public key" )
A =pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _a ( snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _a ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
"""simple docstring"""
A =int(snake_case__ , base=16 )
A =int(snake_case__ , base=16 )
A =primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("Invalid public key" )
A =pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
A_ : Any =logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) | 483 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
A_ : List[Any] =logging.getLogger()
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = """\n""".join(UpperCAmelCase__ )
Path(UpperCAmelCase__ ).open("""w""" ).writelines(UpperCAmelCase__ )
A_ : str ="""patrickvonplaten/t5-tiny-random"""
A_ : Tuple ="""sshleifer/bart-tiny-random"""
A_ : Tuple ="""sshleifer/tiny-mbart"""
A_ : Tuple =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
a_ = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
a_ = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(_UpperCAmelCase , _UpperCAmelCase )
a_ = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
a_ = """translation_en_to_de""" if model == T5_TINY else """summarization"""
a_ = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(_UpperCAmelCase , """argv""" , _UpperCAmelCase ):
run_generate()
assert Path(_UpperCAmelCase ).exists()
# os.remove(Path(output_file_name))
def lowercase__ ( self ):
"""simple docstring"""
self.run_eval_tester(_UpperCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
self.run_eval_tester(_UpperCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
a_ = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
a_ = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
a_ = Path(self.get_auto_remove_tmp_dir() )
a_ = str(tmp_dir / """scores.json""" )
a_ = str(tmp_dir / """val.target""" )
_dump_articles(_UpperCAmelCase , text["""en"""] )
_dump_articles(_UpperCAmelCase , text["""de"""] )
a_ = """translation_en_to_de""" if model == T5_TINY else """summarization"""
a_ = f"\n run_eval_search.py\n {model}\n {str(_UpperCAmelCase )}\n {str(_UpperCAmelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(_UpperCAmelCase , """argv""" , _UpperCAmelCase ):
with CaptureStdout() as cs:
run_search()
a_ = [""" num_beams | length_penalty""", model, """Best score args"""]
a_ = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(_UpperCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_UpperCAmelCase ).exists()
os.remove(Path(_UpperCAmelCase ) ) | 483 | 1 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__SCREAMING_SNAKE_CASE = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ) -> Any:
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
if args.check_lib:
__SCREAMING_SNAKE_CASE = importlib.import_module('transformers')
__SCREAMING_SNAKE_CASE = Path(transformers_module.__file__).parent
else:
__SCREAMING_SNAKE_CASE = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 711 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] ={
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
SCREAMING_SNAKE_CASE_ : Optional[Any] =Dataset.from_dict(lowerCAmelCase_ )
return dataset
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =get_dataset()
SCREAMING_SNAKE_CASE_ : List[Any] =make_duplicate_clusters(__UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Any =get_dataset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int =deduplicate_dataset(__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
print(__UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , __UpperCAmelCase )
| 153 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__a = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( a_: Tuple, a_: Tuple, a_: Tuple, a_: Optional[int], a_: List[Any] ):
for attribute in key.split("." ):
_UpperCAmelCase : str = getattr(a_, a_ )
if weight_type is not None:
_UpperCAmelCase : Dict = getattr(a_, a_ ).shape
else:
_UpperCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCAmelCase : Dict = value
elif weight_type == "weight_g":
_UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_v":
_UpperCAmelCase : str = value
elif weight_type == "bias":
_UpperCAmelCase : int = value
else:
_UpperCAmelCase : Any = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( a_: Any, a_: List[Any] ):
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : int = fairseq_model.state_dict()
_UpperCAmelCase : List[str] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase : Any = False
if "conv_layers" in name:
load_conv_layer(
a_, a_, a_, a_, hf_model.config.feat_extract_norm == "group", )
_UpperCAmelCase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
_UpperCAmelCase : Any = name.split(a_ )[0].split("." )[-2]
_UpperCAmelCase : Optional[int] = mapped_key.replace("*", a_ )
if "weight_g" in name:
_UpperCAmelCase : Union[str, Any] = "weight_g"
elif "weight_v" in name:
_UpperCAmelCase : Optional[int] = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
_UpperCAmelCase : Any = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase : Optional[int] = "weight"
else:
_UpperCAmelCase : str = None
set_recursively(a_, a_, a_, a_, a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( a_: Optional[int], a_: Any, a_: Union[str, Any], a_: Dict, a_: Any ):
_UpperCAmelCase : Dict = full_name.split("conv_layers." )[-1]
_UpperCAmelCase : Tuple = name.split("." )
_UpperCAmelCase : List[str] = int(items[0] )
_UpperCAmelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCAmelCase : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCAmelCase : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCAmelCase : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCAmelCase : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a_ )
@torch.no_grad()
def __UpperCAmelCase ( a_: str, a_: Tuple, a_: Tuple=None ):
# load the pre-trained checkpoints
_UpperCAmelCase : Any = torch.load(a_ )
_UpperCAmelCase : Any = WavLMConfigOrig(checkpoint["cfg"] )
_UpperCAmelCase : int = WavLMOrig(a_ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_UpperCAmelCase : List[str] = WavLMConfig.from_pretrained(a_ )
else:
_UpperCAmelCase : List[str] = WavLMConfig()
_UpperCAmelCase : str = WavLMModel(a_ )
recursively_load_weights(a_, a_ )
hf_wavlm.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 494 | '''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class A__ ( UpperCamelCase ):
"""simple docstring"""
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , UpperCamelCase , ) | 494 | 1 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
__SCREAMING_SNAKE_CASE : Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__SCREAMING_SNAKE_CASE : List[str] = 1
if upper_limit > 0:
__SCREAMING_SNAKE_CASE : List[str] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
lowercase = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowercase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowercase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowercase = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowercase = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowercase = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , _SCREAMING_SNAKE_CASE )
return [m.group(0 ) for m in matches]
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__SCREAMING_SNAKE_CASE : Tuple = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__SCREAMING_SNAKE_CASE : Union[str, Any] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = collections.defaultdict(_SCREAMING_SNAKE_CASE )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : List[str] = None
if _re_tf_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__SCREAMING_SNAKE_CASE : List[str] = tf_models
__SCREAMING_SNAKE_CASE : int = _re_tf_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_flax_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__SCREAMING_SNAKE_CASE : str = flax_models
__SCREAMING_SNAKE_CASE : Optional[Any] = _re_flax_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_pt_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = pt_models
__SCREAMING_SNAKE_CASE : List[Any] = _re_pt_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
if lookup_dict is not None:
while len(_SCREAMING_SNAKE_CASE ) > 0:
if attr_name in model_prefix_to_model_type:
__SCREAMING_SNAKE_CASE : str = True
break
# Try again after removing the last word in the name
__SCREAMING_SNAKE_CASE : Any = "".join(camel_case_split(_SCREAMING_SNAKE_CASE )[:-1] )
__SCREAMING_SNAKE_CASE : Any = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__SCREAMING_SNAKE_CASE : Tuple = list(_SCREAMING_SNAKE_CASE )
all_models.sort()
__SCREAMING_SNAKE_CASE : List[str] = {"model_type": all_models}
__SCREAMING_SNAKE_CASE : List[Any] = [pt_models[t] for t in all_models]
__SCREAMING_SNAKE_CASE : int = [tf_models[t] for t in all_models]
__SCREAMING_SNAKE_CASE : int = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__SCREAMING_SNAKE_CASE : List[Any] = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__SCREAMING_SNAKE_CASE : List[str] = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__SCREAMING_SNAKE_CASE : List[Any] = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__SCREAMING_SNAKE_CASE : Tuple = "AutoTokenizer"
__SCREAMING_SNAKE_CASE : Union[str, Any] = [processors[t] for t in all_models]
return pd.DataFrame(_SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__SCREAMING_SNAKE_CASE : str = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}']
__SCREAMING_SNAKE_CASE : Any = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# The type of pipeline may not exist in this framework
if not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
continue
# First extract all model_names
__SCREAMING_SNAKE_CASE : Dict = []
for name in getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).values():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model_names.append(_SCREAMING_SNAKE_CASE )
else:
model_names.extend(list(_SCREAMING_SNAKE_CASE ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = get_frameworks_table()
__SCREAMING_SNAKE_CASE : Union[str, Any] = Dataset.from_pandas(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_json(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(_SCREAMING_SNAKE_CASE ) )
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = update_pipeline_and_auto_class_table(_SCREAMING_SNAKE_CASE )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(table.keys() )
__SCREAMING_SNAKE_CASE : List[str] = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = Dataset.from_pandas(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_SCREAMING_SNAKE_CASE , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(_SCREAMING_SNAKE_CASE , "pipeline_tags.json" ) )
if commit_sha is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
f'Update with commit {commit_sha}\n\nSee: '
f'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
__SCREAMING_SNAKE_CASE : Tuple = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=_SCREAMING_SNAKE_CASE , repo_type="dataset" , token=_SCREAMING_SNAKE_CASE , commit_message=_SCREAMING_SNAKE_CASE , )
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__SCREAMING_SNAKE_CASE : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for key in pipeline_tasks:
if key not in in_table:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline_tasks[key]["pt"]
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
__SCREAMING_SNAKE_CASE : int = model[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = model.__name__
if model not in in_table.values():
missing.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__SCREAMING_SNAKE_CASE : List[Any] = ", ".join(_SCREAMING_SNAKE_CASE )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
lowercase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 564 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
lowerCAmelCase_ = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase_ : Optional[int] = '\\n\n'
UpperCAmelCase_ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
UpperCAmelCase_ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase = 16 , __lowercase = True , __lowercase=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__A : Optional[Any] = 'cuda'
else:
__A : Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
__A : List[Any] = AutoModelForCausalLM.from_pretrained(__lowercase )
__A : Any = model.to(__lowercase )
__A : List[Any] = AutoTokenizer.from_pretrained(__lowercase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__A : Union[str, Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__lowercase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__A : Tuple = model.config.max_length - 1
else:
__A : str = model.config.max_length
__A : int = tokenizer(
__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , return_tensors='pt' , return_attention_mask=__lowercase , ).to(__lowercase )
__A : Union[str, Any] = encodings['input_ids']
__A : Optional[int] = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__A : Union[str, Any] = []
__A : str = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(__lowercase ) , __lowercase ) ):
__A : str = min(start_index + batch_size , len(__lowercase ) )
__A : Tuple = encoded_texts[start_index:end_index]
__A : List[str] = attn_masks[start_index:end_index]
if add_start_token:
__A : str = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__lowercase )
__A : str = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__A : Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__lowercase ), attn_mask] , dim=1 )
__A : Tuple = encoded_batch
with torch.no_grad():
__A : List[str] = model(__lowercase , attention_mask=__lowercase ).logits
__A : int = out_logits[..., :-1, :].contiguous()
__A : Union[str, Any] = labels[..., 1:].contiguous()
__A : List[Any] = attn_mask[..., 1:].contiguous()
__A : Tuple = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __lowercase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__lowercase )}
| 710 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase_ : int = ''
UpperCAmelCase_ : Union[str, Any] = ''
UpperCAmelCase_ : Any = ''
UpperCAmelCase_ : int = 1 # (0 is vertical, 1 is horizontal)
def _lowercase ( ):
__A ,__A : Optional[int] = get_dataset(UpperCamelCase__, UpperCamelCase__ )
print('Processing...' )
__A ,__A ,__A : Any = update_image_and_anno(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__A : Dict = random_chars(32 )
__A : List[Any] = paths[index].split(os.sep )[-1].rsplit('.', 1 )[0]
__A : Dict = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""", UpperCamelCase__, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__A : Tuple = []
for anno in new_annos[index]:
__A : Any = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""", 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _lowercase ( UpperCamelCase__ : str, UpperCamelCase__ : str ):
__A : Tuple = []
__A : int = []
for label_file in glob.glob(os.path.join(UpperCamelCase__, '*.txt' ) ):
__A : Optional[int] = label_file.split(os.sep )[-1].rsplit('.', 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__A : Optional[Any] = in_file.readlines()
__A : int = os.path.join(UpperCamelCase__, f"""{label_name}.jpg""" )
__A : Optional[int] = []
for obj_list in obj_lists:
__A : str = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def _lowercase ( UpperCamelCase__ : list, UpperCamelCase__ : list, UpperCamelCase__ : int = 1 ):
__A : int = []
__A : Optional[Any] = []
__A : str = []
for idx in range(len(UpperCamelCase__ ) ):
__A : List[Any] = []
__A : List[str] = img_list[idx]
path_list.append(UpperCamelCase__ )
__A : Optional[Any] = anno_list[idx]
__A : Union[str, Any] = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__A : int = cva.flip(UpperCamelCase__, UpperCamelCase__ )
for bbox in img_annos:
__A : Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__A : Tuple = cva.flip(UpperCamelCase__, UpperCamelCase__ )
for bbox in img_annos:
__A : Dict = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def _lowercase ( UpperCamelCase__ : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
__A : int = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 540 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCAmelCase = trt.Logger(trt.Logger.WARNING)
__UpperCAmelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__UpperCAmelCase = parser.parse_args()
if args.tokenizer_name:
__UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__UpperCAmelCase = args.per_device_eval_batch_size
__UpperCAmelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCAmelCase = True
__UpperCAmelCase = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__UpperCAmelCase = 'temp_engine/bert-fp16.engine'
if args.inta:
__UpperCAmelCase = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__UpperCAmelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCAmelCase = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCAmelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCAmelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCAmelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCAmelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = np.asarray(inputs["input_ids"] , dtype=np.intaa )
a__ : List[Any] = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
a__ : str = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase__ )
# start time
a__ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase__ ) for d_inp in d_inputs] + [int(lowerCAmelCase__ ), int(lowerCAmelCase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
cuda.memcpy_dtoh_async(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
a__ : str = time.time()
a__ : str = end_time - start_time
a__ : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCAmelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCAmelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCAmelCase = raw_datasets['validation'].column_names
__UpperCAmelCase = 'question' if 'question' in column_names else column_names[0]
__UpperCAmelCase = 'context' if 'context' in column_names else column_names[1]
__UpperCAmelCase = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCAmelCase = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__UpperCAmelCase = min(args.max_seq_length, tokenizer.model_max_length)
def lowercase__ ( lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
a__ : Union[str, Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
a__ : str = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=lowerCAmelCase__ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
a__ : Optional[int] = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
a__ : Union[str, Any] = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
a__ : str = tokenized_examples.sequence_ids(lowerCAmelCase__ )
a__ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
a__ : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
a__ : Any = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
__UpperCAmelCase = raw_datasets['validation']
# Validation Feature Creation
__UpperCAmelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__UpperCAmelCase = default_data_collator
__UpperCAmelCase = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__UpperCAmelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowercase__ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any]="eval" ) -> int:
'''simple docstring'''
# Post-processing: we match the start logits and end logits to answers in the original context.
a__ : str = postprocess_qa_predictions(
examples=lowerCAmelCase__ , features=lowerCAmelCase__ , predictions=lowerCAmelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
a__ : int = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
a__ : Any = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
a__ : Tuple = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase__ , label_ids=lowerCAmelCase__ )
__UpperCAmelCase = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowercase__ ( lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(lowerCAmelCase__ ) ) * engine.get_binding_dtype(lowerCAmelCase__ ).itemsize
# Allocate device memory for inputs and outputs.
__UpperCAmelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCAmelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCAmelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCAmelCase = cuda.mem_alloc(h_outputa.nbytes)
__UpperCAmelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCAmelCase = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__UpperCAmelCase = 0.0
__UpperCAmelCase = 0
__UpperCAmelCase = timeit.default_timer()
__UpperCAmelCase = None
for step, batch in enumerate(eval_dataloader):
__UpperCAmelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCAmelCase = outputs
__UpperCAmelCase = torch.tensor(start_logits)
__UpperCAmelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCAmelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCAmelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCAmelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCAmelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCAmelCase = nested_truncate(all_preds, len(eval_dataset))
__UpperCAmelCase = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__UpperCAmelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCAmelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}") | 642 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_UpperCamelCase : str = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
_UpperCamelCase : int = {'facebook/blenderbot-3B': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __snake_case ( ):
__UpperCAmelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__UpperCAmelCase = bs[:]
__UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def __snake_case ( lowerCAmelCase : List[Any] ):
__UpperCAmelCase = set()
__UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase = char
return pairs
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self: Union[str, Any] ,a: Tuple ,a: Dict ,a: Dict="replace" ,a: int="<s>" ,a: List[str]="</s>" ,a: Any="</s>" ,a: str="<s>" ,a: Dict="<unk>" ,a: Union[str, Any]="<pad>" ,a: Optional[int]="<mask>" ,a: int=False ,**a: int ,):
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else bos_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else eos_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else sep_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else cls_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else unk_token
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else mask_token
super().__init__(
errors=a ,bos_token=a ,eos_token=a ,unk_token=a ,sep_token=a ,cls_token=a ,pad_token=a ,mask_token=a ,add_prefix_space=a ,**a ,)
with open(a ,encoding='utf-8' ) as vocab_handle:
__UpperCAmelCase = json.load(a )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = errors # how to handle errors in decoding
__UpperCAmelCase = bytes_to_unicode()
__UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(a ,encoding='utf-8' ) as merges_handle:
__UpperCAmelCase = merges_handle.read().split('\n' )[1:-1]
__UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase = dict(zip(a ,range(len(a ) ) ) )
__UpperCAmelCase = {}
__UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case ( self: Optional[Any] ):
return len(self.encoder )
def snake_case ( self: Optional[Any] ):
return dict(self.encoder ,**self.added_tokens_encoder )
def snake_case ( self: Optional[int] ,a: Optional[int] ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase = tuple(a )
__UpperCAmelCase = get_pairs(a )
if not pairs:
return token
while True:
__UpperCAmelCase = min(a ,key=lambda a : self.bpe_ranks.get(a ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase = bigram
__UpperCAmelCase = []
__UpperCAmelCase = 0
while i < len(a ):
try:
__UpperCAmelCase = word.index(a ,a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase = tuple(a )
__UpperCAmelCase = new_word
if len(a ) == 1:
break
else:
__UpperCAmelCase = get_pairs(a )
__UpperCAmelCase = ' '.join(a )
__UpperCAmelCase = word
return word
def snake_case ( self: int ,a: str ):
__UpperCAmelCase = []
for token in re.findall(self.pat ,a ):
__UpperCAmelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) )
return bpe_tokens
def snake_case ( self: Optional[Any] ,a: Union[str, Any] ):
return self.encoder.get(a ,self.encoder.get(self.unk_token ) )
def snake_case ( self: Any ,a: Union[str, Any] ):
return self.decoder.get(a )
def snake_case ( self: Dict ,a: Union[str, Any] ):
__UpperCAmelCase = ''.join(a )
__UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def snake_case ( self: Optional[Any] ,a: str ,a: Optional[str] = None ):
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCAmelCase = os.path.join(
a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=a ,ensure_ascii=a ) + '\n' )
__UpperCAmelCase = 0
with open(a ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__UpperCAmelCase = token_index
writer.write(' '.join(a ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case ( self: List[str] ,a: List[int] ,a: Optional[List[int]] = None ,a: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a ,token_ids_a=a ,already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def snake_case ( self: Optional[int] ,a: List[int] ,a: Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self: Dict ,a: List[Any] ,a: Optional[int]=False ,**a: Optional[Any] ):
__UpperCAmelCase = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
__UpperCAmelCase = ' ' + text
return (text, kwargs)
def snake_case ( self: Tuple ,a: List[int] ,a: Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def snake_case ( self: Any ,a: "Conversation" ):
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(a )
__UpperCAmelCase = ' '.join(a )
__UpperCAmelCase = self.encode(a )
if len(a ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 396 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 713 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 137 | 0 |
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase__ : Any = """path-to-your-trained-model"""
lowerCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowerCamelCase__ : int = """A photo of sks dog in a bucket"""
lowerCamelCase__ : Any = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 33 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __A ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case_( self )-> Any:
lowercase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
lowercase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
lowercase__ = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}] )
lowercase__ = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] , )
lowercase__ = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
# Legacy behavior
lowercase__ = text_classifier('''This is great !''' , return_all_scores=_lowerCamelCase )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
lowercase__ = text_classifier('''This is great !''' , return_all_scores=_lowerCamelCase )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}]] )
lowercase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=_lowerCamelCase )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] , )
lowercase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=_lowerCamelCase )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
] , )
@require_torch
def snake_case_( self )-> Union[str, Any]:
import torch
lowercase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
lowercase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@require_tf
def snake_case_( self )-> Optional[Any]:
lowercase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
lowercase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@slow
@require_torch
def snake_case_( self )-> Optional[Any]:
lowercase__ = pipeline('''text-classification''' )
lowercase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowercase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowercase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
@slow
@require_tf
def snake_case_( self )-> Optional[Any]:
lowercase__ = pipeline('''text-classification''' , framework='''tf''' )
lowercase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowercase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowercase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> str:
lowercase__ = TextClassificationPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> Dict:
lowercase__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowercase__ = '''HuggingFace is in'''
lowercase__ = text_classifier(_lowerCamelCase )
self.assertEqual(nested_simplify(_lowerCamelCase ) , [{'''label''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
lowercase__ = ['''HuggingFace is in ''', '''Paris is in France''']
lowercase__ = text_classifier(_lowerCamelCase )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [{'''label''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase )}, {'''label''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowercase__ = text_classifier(_lowerCamelCase , top_k=_lowerCamelCase )
lowercase__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [[{'''label''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase )}] * N, [{'''label''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase )}] * N] , )
lowercase__ = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
lowercase__ = text_classifier(_lowerCamelCase )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {'''label''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowercase__ = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(_lowerCamelCase ):
text_classifier(_lowerCamelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowercase__ = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [{'''label''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 161 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
UpperCamelCase = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase = {F'''funnel-transformer/{name}''': 512 for name in _model_names}
UpperCamelCase = {F'''funnel-transformer/{name}''': {"""do_lower_case""": True} for name in _model_names}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_INIT_CONFIGURATION
snake_case = FunnelTokenizer
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = 2
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<sep>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="##" , **_SCREAMING_SNAKE_CASE , )->List[Any]:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , clean_text=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , wordpieces_prefix=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
A_ : str = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) )
A_ : Any = do_lower_case
A_ : Union[str, Any] = strip_accents
A_ : str = tokenize_chinese_chars
A_ : str = normalizer_class(**_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = do_lower_case
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )->int:
'''simple docstring'''
A_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]:
'''simple docstring'''
A_ : Optional[int] = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[str]:
'''simple docstring'''
A_ : Union[str, Any] = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 152 |
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
if tokenize_kwargs is None:
A_ : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
A_ : Optional[Any] = truncation
A_ : Dict = tokenize_kwargs
A_ : Union[str, Any] = {}
if return_tensors is not None:
A_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def _snake_case ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Dict[str, GenericTensor]:
'''simple docstring'''
A_ : Optional[Any] = self.framework
A_ : Tuple = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : str = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Any:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 152 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''sentencepiece.model'''}
a__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
a__ = {
'''google/rembert''': 256,
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=False , _a=True , _a=True , _a="[CLS]" , _a="[SEP]" , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Dict:
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_a : List[Any] = do_lower_case
_a : Optional[int] = remove_space
_a : List[Any] = keep_accents
_a : Tuple = vocab_file
_a : Union[str, Any] = spm.SentencePieceProcessor()
self.sp_model.Load(snake_case_ )
@property
def __lowercase ( self ) -> Dict:
return len(self.sp_model )
def __lowercase ( self ) -> Any:
_a : Optional[int] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
_a : str = self.__dict__.copy()
_a : int = None
return state
def __setstate__( self , _a ) -> str:
_a : Optional[int] = d
_a : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __lowercase ( self , _a , _a=False ) -> int:
_a : Any = self.sp_model.EncodeAsPieces(snake_case_ )
return pieces
def __lowercase ( self , _a ) -> List[Any]:
return self.sp_model.PieceToId(snake_case_ )
def __lowercase ( self , _a ) -> Any:
return self.sp_model.IdToPiece(snake_case_ )
def __lowercase ( self , _a ) -> List[str]:
_a : Optional[int] = self.sp_model.decode_pieces(snake_case_ )
return out_string
def __lowercase ( self , _a , _a = None ) -> List[Any]:
_a : Union[str, Any] = [self.sep_token_id]
_a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self , _a , _a = None , _a = False ) -> Union[str, Any]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def __lowercase ( self , _a , _a = None ) -> Tuple:
_a : Optional[int] = [self.sep_token_id]
_a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self , _a , _a = None ) -> List[Any]:
if not os.path.isdir(snake_case_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(snake_case_ ) )
return
_a : Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 14 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
__lowerCAmelCase : List[str] = ["""audio_values""", """audio_mask"""]
def __init__( self , snake_case_=2_0_4_8 , snake_case_=1 , snake_case_=[1_6, 1_6] , snake_case_=1_2_8 , snake_case_=4_4_1_0_0 , snake_case_=8_6 , snake_case_=2_0_4_8 , snake_case_=0.0 , **snake_case_ , ):
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = spectrogram_length
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Tuple = feature_size // self.patch_size[1]
_lowerCAmelCase : Optional[int] = n_fft
_lowerCAmelCase : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
_lowerCAmelCase : Optional[Any] = sampling_rate
_lowerCAmelCase : Any = padding_value
_lowerCAmelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=snake_case_ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : int = spectrogram(
snake_case_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
_lowerCAmelCase : int = log_spec[:, :-1]
_lowerCAmelCase : List[Any] = log_spec - 20.0
_lowerCAmelCase : Any = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = None , snake_case_ = False , snake_case_ = False , **snake_case_ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_lowerCAmelCase : List[Any] = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCAmelCase : List[str] = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCAmelCase : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
_lowerCAmelCase : Optional[Any] = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCAmelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_lowerCAmelCase : Optional[Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , snake_case_ ):
_lowerCAmelCase : Optional[Any] = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_lowerCAmelCase : Any = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_lowerCAmelCase : Union[str, Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_lowerCAmelCase : Optional[int] = np.array(snake_case_ ).astype(np.floataa )
# convert into correct format for padding
_lowerCAmelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_lowerCAmelCase : Union[str, Any] = np.ones([len(snake_case_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_lowerCAmelCase : int = padded_audio_features * self.padding_value
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Union[str, Any] = audio_features[i]
_lowerCAmelCase : List[str] = feature
# return as BatchFeature
if return_attention_mask:
_lowerCAmelCase : str = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_lowerCAmelCase : List[Any] = {"""audio_values""": padded_audio_features}
_lowerCAmelCase : Dict = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
return encoded_inputs
| 384 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class A__ :
"""simple docstring"""
def __init__( self : Any ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {}
def __magic_name__ ( self : Any , A_ : Optional[Any] , A_ : Optional[int] , A_ : str=1 ):
'''simple docstring'''
if self.graph.get(__snake_case ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_lowerCAmelCase : Dict = [[w, v]]
if not self.graph.get(__snake_case ):
_lowerCAmelCase : List[Any] = []
def __magic_name__ ( self : int ):
'''simple docstring'''
return list(self.graph )
def __magic_name__ ( self : Union[str, Any] , A_ : List[Any] , A_ : Optional[Any] ):
'''simple docstring'''
if self.graph.get(__snake_case ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__snake_case )
def __magic_name__ ( self : Optional[int] , A_ : Tuple=-2 , A_ : int=-1 ):
'''simple docstring'''
if s == d:
return []
_lowerCAmelCase : int = []
_lowerCAmelCase : Tuple = []
if s == -2:
_lowerCAmelCase : str = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
_lowerCAmelCase : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__snake_case )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__snake_case ) != 0:
_lowerCAmelCase : List[str] = stack[len(__snake_case ) - 1]
else:
_lowerCAmelCase : Optional[Any] = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return visited
def __magic_name__ ( self : Union[str, Any] , A_ : List[Any]=-1 ):
'''simple docstring'''
if c == -1:
_lowerCAmelCase : str = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(__snake_case ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_lowerCAmelCase : Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(__snake_case , __snake_case , 1 )
def __magic_name__ ( self : Optional[Any] , A_ : str=-2 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = deque()
_lowerCAmelCase : List[str] = []
if s == -2:
_lowerCAmelCase : Any = list(self.graph )[0]
d.append(__snake_case )
visited.append(__snake_case )
while d:
_lowerCAmelCase : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __magic_name__ ( self : str , A_ : str ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __magic_name__ ( self : int , A_ : List[str] ):
'''simple docstring'''
return len(self.graph[u] )
def __magic_name__ ( self : str , A_ : List[Any]=-2 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Dict = []
if s == -2:
_lowerCAmelCase : Union[str, Any] = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
_lowerCAmelCase : Optional[int] = s
_lowerCAmelCase : List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase : Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__snake_case ) != 0:
_lowerCAmelCase : Optional[int] = stack[len(__snake_case ) - 1]
else:
_lowerCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return sorted_nodes
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Any = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
_lowerCAmelCase : int = -2
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : str = s
_lowerCAmelCase : str = False
_lowerCAmelCase : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase : str = len(__snake_case ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase : Tuple = True
if len(__snake_case ) != 0:
_lowerCAmelCase : int = stack[len(__snake_case ) - 1]
else:
_lowerCAmelCase : Tuple = False
indirect_parents.append(__snake_case )
_lowerCAmelCase : Tuple = s
_lowerCAmelCase : str = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return list(__snake_case )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : str = []
_lowerCAmelCase : List[str] = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
_lowerCAmelCase : List[str] = -2
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = s
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase : List[Any] = len(__snake_case ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase : Tuple = True
if len(__snake_case ) != 0:
_lowerCAmelCase : int = stack[len(__snake_case ) - 1]
else:
_lowerCAmelCase : Tuple = False
indirect_parents.append(__snake_case )
_lowerCAmelCase : int = s
_lowerCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return False
def __magic_name__ ( self : List[Any] , A_ : List[str]=-2 , A_ : Optional[Any]=-1 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = time()
self.dfs(__snake_case , __snake_case )
_lowerCAmelCase : Optional[Any] = time()
return end - begin
def __magic_name__ ( self : int , A_ : int=-2 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = time()
self.bfs(__snake_case )
_lowerCAmelCase : Tuple = time()
return end - begin
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
def __magic_name__ ( self : Union[str, Any] , A_ : int , A_ : Dict , A_ : List[str]=1 ):
'''simple docstring'''
if self.graph.get(__snake_case ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_lowerCAmelCase : int = [[w, v]]
# add the other way
if self.graph.get(__snake_case ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_lowerCAmelCase : str = [[w, u]]
def __magic_name__ ( self : Tuple , A_ : Optional[int] , A_ : Tuple ):
'''simple docstring'''
if self.graph.get(__snake_case ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__snake_case )
# the other way round
if self.graph.get(__snake_case ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__snake_case )
def __magic_name__ ( self : int , A_ : int=-2 , A_ : List[Any]=-1 ):
'''simple docstring'''
if s == d:
return []
_lowerCAmelCase : int = []
_lowerCAmelCase : str = []
if s == -2:
_lowerCAmelCase : Dict = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
_lowerCAmelCase : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__snake_case )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__snake_case ) != 0:
_lowerCAmelCase : Optional[int] = stack[len(__snake_case ) - 1]
else:
_lowerCAmelCase : Any = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return visited
def __magic_name__ ( self : Dict , A_ : List[str]=-1 ):
'''simple docstring'''
if c == -1:
_lowerCAmelCase : str = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(__snake_case ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_lowerCAmelCase : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(__snake_case , __snake_case , 1 )
def __magic_name__ ( self : int , A_ : str=-2 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = deque()
_lowerCAmelCase : List[Any] = []
if s == -2:
_lowerCAmelCase : Union[str, Any] = list(self.graph )[0]
d.append(__snake_case )
visited.append(__snake_case )
while d:
_lowerCAmelCase : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __magic_name__ ( self : int , A_ : Any ):
'''simple docstring'''
return len(self.graph[u] )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Any = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
_lowerCAmelCase : Optional[Any] = -2
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : int = s
_lowerCAmelCase : str = False
_lowerCAmelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase : Tuple = len(__snake_case ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase : str = True
if len(__snake_case ) != 0:
_lowerCAmelCase : Dict = stack[len(__snake_case ) - 1]
else:
_lowerCAmelCase : Tuple = False
indirect_parents.append(__snake_case )
_lowerCAmelCase : Union[str, Any] = s
_lowerCAmelCase : Any = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return list(__snake_case )
def __magic_name__ ( self : Any ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Any = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
_lowerCAmelCase : Any = -2
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Dict = s
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase : int = len(__snake_case ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase : Optional[Any] = True
if len(__snake_case ) != 0:
_lowerCAmelCase : Any = stack[len(__snake_case ) - 1]
else:
_lowerCAmelCase : List[Any] = False
indirect_parents.append(__snake_case )
_lowerCAmelCase : Any = s
_lowerCAmelCase : Any = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return False
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return list(self.graph )
def __magic_name__ ( self : Optional[int] , A_ : Dict=-2 , A_ : int=-1 ):
'''simple docstring'''
_lowerCAmelCase : str = time()
self.dfs(__snake_case , __snake_case )
_lowerCAmelCase : Tuple = time()
return end - begin
def __magic_name__ ( self : List[str] , A_ : Union[str, Any]=-2 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = time()
self.bfs(__snake_case )
_lowerCAmelCase : Tuple = time()
return end - begin
| 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__UpperCAmelCase = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 503 | 0 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
A_ = False
if low == high:
return swapped
A_ = low
A_ = high
while left < right:
if collection[left] > collection[right]:
A_ ,A_ = (
collection[right],
collection[left],
)
A_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
A_ ,A_ = (
collection[right + 1],
collection[left],
)
A_ = True
A_ = low + int((high - low) / 2 )
A_ = circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = circle_sort_util(SCREAMING_SNAKE_CASE , mid + 1 , SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
A_ = True
while is_not_sorted is True:
A_ = circle_sort_util(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
__lowercase = input("""Enter numbers separated by a comma:\n""").strip()
__lowercase = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 203 |
from itertools import product
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = sides_number
A_ = max_face_number * dice_number
A_ = [0] * (max_total + 1)
A_ = 1
A_ = range(SCREAMING_SNAKE_CASE , max_face_number + 1 )
for dice_numbers in product(SCREAMING_SNAKE_CASE , repeat=SCREAMING_SNAKE_CASE ):
A_ = sum(SCREAMING_SNAKE_CASE )
totals_frequencies[total] += 1
return totals_frequencies
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
A_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
A_ = 0
A_ = 9
A_ = 4 * 9
A_ = 6
for peter_total in range(SCREAMING_SNAKE_CASE , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
A_ = (4**9) * (6**6)
A_ = peter_wins_count / total_games_number
A_ = round(SCREAMING_SNAKE_CASE , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 203 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a =logging.get_logger(__name__)
a ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
a =[
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : Any ) -> List[str]:
__lowerCamelCase : Union[str, Any] = {}
with open(lowerCamelCase__ , 'r' ) as file:
for line_number, line in enumerate(lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = line.strip()
if line:
__lowerCamelCase : Tuple = line.split()
__lowerCamelCase : List[str] = line_number
__lowerCamelCase : Optional[Any] = words[0]
__lowerCamelCase : int = value
return result
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : str ) -> Any:
for attribute in key.split('.' ):
__lowerCamelCase : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase__ ):
__lowerCamelCase : int = PARAM_MAPPING[full_name.split('.' )[-1]]
__lowerCamelCase : Any = 'param'
if weight_type is not None and weight_type != "param":
__lowerCamelCase : int = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
elif weight_type is not None and weight_type == "param":
__lowerCamelCase : Optional[int] = hf_pointer
for attribute in hf_param_name.split('.' ):
__lowerCamelCase : Tuple = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = shape_pointer.shape
# let's reduce dimension
__lowerCamelCase : int = value[0]
else:
__lowerCamelCase : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowerCamelCase : Any = value
elif weight_type == "weight_g":
__lowerCamelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCamelCase : Any = value
elif weight_type == "bias":
__lowerCamelCase : Optional[int] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__lowerCamelCase : Union[str, Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = value
else:
__lowerCamelCase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : int ) -> Dict:
__lowerCamelCase : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = PARAM_MAPPING[full_name.split('.' )[-1]]
__lowerCamelCase : List[str] = 'param'
if weight_type is not None and weight_type != "param":
__lowerCamelCase : List[str] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowerCamelCase : List[Any] = '.'.join([key, hf_param_name] )
else:
__lowerCamelCase : Optional[Any] = key
__lowerCamelCase : Any = value if 'lm_head' in full_key else value[0]
a ={
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Any=None ) -> Any:
__lowerCamelCase : List[str] = False
for key, mapped_key in MAPPING.items():
__lowerCamelCase : Tuple = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase : Any = True
if "*" in mapped_key:
__lowerCamelCase : Tuple = name.split(lowerCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase : List[str] = mapped_key.replace('*' , lowerCamelCase__ )
if "weight_g" in name:
__lowerCamelCase : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase : Optional[Any] = 'weight_v'
elif "bias" in name:
__lowerCamelCase : str = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase : Optional[Any] = 'weight'
else:
__lowerCamelCase : Optional[Any] = None
if hf_dict is not None:
rename_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return is_used
return is_used
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) -> Any:
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Tuple = fairseq_model.state_dict()
__lowerCamelCase : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase : str = True
else:
__lowerCamelCase : Optional[int] = load_wavaveca_layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] ) -> Tuple:
__lowerCamelCase : Dict = full_name.split('conv_layers.' )[-1]
__lowerCamelCase : List[Any] = name.split('.' )
__lowerCamelCase : Tuple = int(items[0] )
__lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowerCamelCase : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowerCamelCase : Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowerCamelCase : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowerCamelCase : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : str=True , lowerCamelCase__ : Tuple=False ) -> Dict:
if config_path is not None:
__lowerCamelCase : List[Any] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
else:
__lowerCamelCase : List[str] = WavaVecaConfig()
if is_seq_class:
__lowerCamelCase : Optional[Any] = read_txt_into_dict(lowerCamelCase__ )
__lowerCamelCase : str = idalabel
__lowerCamelCase : Union[str, Any] = WavaVecaForSequenceClassification(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
feature_extractor.save_pretrained(lowerCamelCase__ )
elif is_finetuned:
if dict_path:
__lowerCamelCase : int = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase : Tuple = target_dict.pad_index
__lowerCamelCase : int = target_dict.bos_index
__lowerCamelCase : Any = target_dict.eos_index
__lowerCamelCase : Optional[Any] = len(target_dict.symbols )
__lowerCamelCase : List[str] = os.path.join(lowerCamelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__lowerCamelCase : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase : Any = 0
__lowerCamelCase : Optional[int] = 1
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase__ , )
__lowerCamelCase : List[Any] = True if config.feat_extract_norm == 'layer' else False
__lowerCamelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
__lowerCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
__lowerCamelCase : Dict = WavaVecaForCTC(lowerCamelCase__ )
else:
__lowerCamelCase : str = WavaVecaForPreTraining(lowerCamelCase__ )
if is_finetuned or is_seq_class:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__lowerCamelCase : Tuple = argparse.Namespace(task='audio_pretraining' )
__lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
a =parser.parse_args()
a =not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 700 |
a =[
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 337 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=1_8 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=3_2 , _lowerCamelCase=True , ):
UpperCamelCase_: Any = parent
UpperCamelCase_: Optional[Any] = batch_size
UpperCamelCase_: List[str] = num_channels
UpperCamelCase_: Optional[Any] = image_size
UpperCamelCase_: Optional[int] = min_resolution
UpperCamelCase_: List[str] = max_resolution
UpperCamelCase_: Tuple = do_resize
UpperCamelCase_: Union[str, Any] = size_divisor
UpperCamelCase_: Optional[int] = do_rescale
def _a ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : str =GLPNImageProcessor if is_vision_available() else None
def _a ( self ):
UpperCamelCase_: Tuple = GLPNImageProcessingTester(self )
@property
def _a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
UpperCamelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size_divisor' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'resample' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_rescale' ) )
def _a ( self ):
pass
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_: List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_: Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_: str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) | 57 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Union[str, Any] = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple ='''open-llama'''
def __init__( self , _lowerCamelCase=1_0_0_0_0_0 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=1_1_0_0_8 , _lowerCamelCase=3_2 , _lowerCamelCase=3_2 , _lowerCamelCase="silu" , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-6 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
UpperCamelCase_: int = vocab_size
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: Dict = hidden_size
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: Union[str, Any] = num_hidden_layers
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: Union[str, Any] = hidden_act
UpperCamelCase_: Union[str, Any] = initializer_range
UpperCamelCase_: List[Any] = rms_norm_eps
UpperCamelCase_: Union[str, Any] = use_cache
UpperCamelCase_: Dict = kwargs.pop(
'use_memorry_efficient_attention' , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = hidden_dropout_prob
UpperCamelCase_: Any = attention_dropout_prob
UpperCamelCase_: int = use_stable_embedding
UpperCamelCase_: Tuple = shared_input_output_embedding
UpperCamelCase_: str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def _a ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
UpperCamelCase_: str = self.rope_scaling.get('type' , _lowerCamelCase )
UpperCamelCase_: int = self.rope_scaling.get('factor' , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' ) | 57 | 1 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __magic_name__ ( lowercase__ , lowercase__ ):
@register_to_config
def __init__( self : Optional[int] , snake_case_ : int = 128 , snake_case_ : int = 256 , snake_case_ : float = 2000.0 , snake_case_ : int = 768 , snake_case_ : int = 12 , snake_case_ : int = 12 , snake_case_ : int = 64 , snake_case_ : int = 2048 , snake_case_ : float = 0.1 , ):
super().__init__()
__snake_case = nn.Sequential(
nn.Linear(snake_case_ , d_model * 4 , bias=snake_case_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=snake_case_ ) , nn.SiLU() , )
__snake_case = nn.Embedding(snake_case_ , snake_case_ )
__snake_case = False
__snake_case = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__snake_case = nn.Dropout(p=snake_case_ )
__snake_case = nn.ModuleList()
for lyr_num in range(snake_case_ ):
# FiLM conditional T5 decoder
__snake_case = DecoderLayer(d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ )
self.decoders.append(snake_case_ )
__snake_case = TaLayerNorm(snake_case_ )
__snake_case = nn.Dropout(p=snake_case_ )
__snake_case = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
def lowerCAmelCase ( self : Tuple , snake_case_ : Any , snake_case_ : str ):
__snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self : int , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : int ):
__snake_case , __snake_case , __snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__snake_case = self.conditioning_emb(snake_case_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__snake_case = torch.broadcast_to(
torch.arange(snake_case_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__snake_case = self.position_encoding(snake_case_ )
__snake_case = self.continuous_inputs_projection(snake_case_ )
inputs += position_encodings
__snake_case = self.dropout(snake_case_ )
# decoder: No padding present.
__snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__snake_case = [(x, self.encoder_decoder_mask(snake_case_ , snake_case_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__snake_case = lyr(
snake_case_ , conditioning_emb=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )[0]
__snake_case = self.decoder_norm(snake_case_ )
__snake_case = self.post_dropout(snake_case_ )
__snake_case = self.spec_out(snake_case_ )
return spec_out
class __magic_name__ ( nn.Module ):
def __init__( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[Any]=1e-6 ):
super().__init__()
__snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , dropout_rate=snake_case_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , dropout_rate=snake_case_ , layer_norm_epsilon=snake_case_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ , layer_norm_epsilon=snake_case_ ) )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : Any , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : int=None , snake_case_ : Dict=None , snake_case_ : Optional[int]=None , ):
__snake_case = self.layer[0](
snake_case_ , conditioning_emb=snake_case_ , attention_mask=snake_case_ , )
if encoder_hidden_states is not None:
__snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
__snake_case = self.layer[1](
snake_case_ , key_value_states=snake_case_ , attention_mask=snake_case_ , )
# Apply Film Conditional Feed Forward layer
__snake_case = self.layer[-1](snake_case_ , snake_case_ )
return (hidden_states,)
class __magic_name__ ( nn.Module ):
def __init__( self : Any , snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : str ):
super().__init__()
__snake_case = TaLayerNorm(snake_case_ )
__snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case_ )
__snake_case = Attention(query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , out_bias=snake_case_ , scale_qk=snake_case_ )
__snake_case = nn.Dropout(snake_case_ )
def lowerCAmelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=None , ):
# pre_self_attention_layer_norm
__snake_case = self.layer_norm(snake_case_ )
if conditioning_emb is not None:
__snake_case = self.FiLMLayer(snake_case_ , snake_case_ )
# Self-attention block
__snake_case = self.attention(snake_case_ )
__snake_case = hidden_states + self.dropout(snake_case_ )
return hidden_states
class __magic_name__ ( nn.Module ):
def __init__( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[Any] ):
super().__init__()
__snake_case = Attention(query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , out_bias=snake_case_ , scale_qk=snake_case_ )
__snake_case = TaLayerNorm(snake_case_ , eps=snake_case_ )
__snake_case = nn.Dropout(snake_case_ )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : str=None , snake_case_ : Optional[int]=None , ):
__snake_case = self.layer_norm(snake_case_ )
__snake_case = self.attention(
snake_case_ , encoder_hidden_states=snake_case_ , attention_mask=attention_mask.squeeze(1 ) , )
__snake_case = hidden_states + self.dropout(snake_case_ )
return layer_output
class __magic_name__ ( nn.Module ):
def __init__( self : Any , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ):
super().__init__()
__snake_case = TaDenseGatedActDense(d_model=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ )
__snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case_ )
__snake_case = TaLayerNorm(snake_case_ , eps=snake_case_ )
__snake_case = nn.Dropout(snake_case_ )
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int]=None ):
__snake_case = self.layer_norm(snake_case_ )
if conditioning_emb is not None:
__snake_case = self.film(snake_case_ , snake_case_ )
__snake_case = self.DenseReluDense(snake_case_ )
__snake_case = hidden_states + self.dropout(snake_case_ )
return hidden_states
class __magic_name__ ( nn.Module ):
def __init__( self : Any , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
super().__init__()
__snake_case = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__snake_case = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__snake_case = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__snake_case = nn.Dropout(snake_case_ )
__snake_case = NewGELUActivation()
def lowerCAmelCase ( self : Any , snake_case_ : List[str] ):
__snake_case = self.act(self.wi_a(snake_case_ ) )
__snake_case = self.wi_a(snake_case_ )
__snake_case = hidden_gelu * hidden_linear
__snake_case = self.dropout(snake_case_ )
__snake_case = self.wo(snake_case_ )
return hidden_states
class __magic_name__ ( nn.Module ):
def __init__( self : Any , snake_case_ : int , snake_case_ : Tuple=1e-6 ):
super().__init__()
__snake_case = nn.Parameter(torch.ones(snake_case_ ) )
__snake_case = eps
def lowerCAmelCase ( self : List[str] , snake_case_ : List[str] ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=snake_case_ )
__snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __magic_name__ ( nn.Module ):
def lowerCAmelCase ( self : str , snake_case_ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(snake_case_ , 3.0 )) ))
class __magic_name__ ( nn.Module ):
def __init__( self : int , snake_case_ : Optional[Any] , snake_case_ : Any ):
super().__init__()
__snake_case = nn.Linear(snake_case_ , out_features * 2 , bias=snake_case_ )
def lowerCAmelCase ( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ):
__snake_case = self.scale_bias(snake_case_ )
__snake_case , __snake_case = torch.chunk(snake_case_ , 2 , -1 )
__snake_case = x * (1 + scale) + shift
return x
| 720 |
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 10_00 , SCREAMING_SNAKE_CASE = True ) -> int:
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(SCREAMING_SNAKE_CASE ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
__snake_case = lower
__snake_case = higher
__snake_case = []
while True:
__snake_case = get_avg(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
last_numbers.append(SCREAMING_SNAKE_CASE )
if answer(SCREAMING_SNAKE_CASE ) == "low":
__snake_case = number
elif answer(SCREAMING_SNAKE_CASE ) == "high":
__snake_case = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
__snake_case = int(input("Enter lower value : " ).strip() )
__snake_case = int(input("Enter high value : " ).strip() )
__snake_case = int(input("Enter value to guess : " ).strip() )
guess_the_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 614 | 0 |
from math import pi
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0)) | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 1 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
_UpperCAmelCase : int = int(number**0.5 )
return number == sq * sq
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
"""simple docstring"""
_UpperCAmelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase : int = x_den * y_den * z_den
_UpperCAmelCase : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE = 3_5 ) -> int:
"""simple docstring"""
_UpperCAmelCase : set = set()
_UpperCAmelCase : int
_UpperCAmelCase : Fraction = Fraction(0 )
_UpperCAmelCase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_UpperCAmelCase : Tuple = x_num * y_den + x_den * y_num
_UpperCAmelCase : int = x_den * y_den
_UpperCAmelCase : Union[str, Any] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase : int = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase : Union[str, Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_UpperCAmelCase : str = x_num * y_num
_UpperCAmelCase : Any = x_den * y_num + x_num * y_den
_UpperCAmelCase : List[str] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase : List[Any] = x_num * x_num * y_num * y_num
_UpperCAmelCase : Union[str, Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase : Tuple = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase : Dict = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase : Dict = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 328 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
set_seed(7_7_0)
__lowerCamelCase = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
__lowerCamelCase = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
__lowerCamelCase = os.path.dirname(os.path.abspath(__file__))
__lowerCamelCase = os.path.join(os.path.expanduser('~'), '.cache')
__lowerCamelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = model_type
if use_small:
key += "_small"
return os.path.join(_SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]["file_name"] )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , local_dir=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> Dict:
"""simple docstring"""
if model_type == "text":
_UpperCAmelCase : int = BarkSemanticModel
_UpperCAmelCase : Dict = BarkSemanticConfig
_UpperCAmelCase : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
_UpperCAmelCase : Optional[Any] = BarkCoarseModel
_UpperCAmelCase : int = BarkCoarseConfig
_UpperCAmelCase : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
_UpperCAmelCase : Dict = BarkFineModel
_UpperCAmelCase : Optional[Any] = BarkFineConfig
_UpperCAmelCase : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_UpperCAmelCase : Union[str, Any] = F"""{model_type}_small""" if use_small else model_type
_UpperCAmelCase : str = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
_UpperCAmelCase : int = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
# this is a hack
_UpperCAmelCase : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_UpperCAmelCase : List[Any] = model_args["vocab_size"]
_UpperCAmelCase : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_UpperCAmelCase : str = model_args.pop("n_head" )
_UpperCAmelCase : Optional[int] = model_args.pop("n_embd" )
_UpperCAmelCase : Optional[Any] = model_args.pop("n_layer" )
_UpperCAmelCase : Tuple = ConfigClass(**checkpoint["model_args"] )
_UpperCAmelCase : List[Any] = ModelClass(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = GenerationConfigClass()
_UpperCAmelCase : str = model_generation_config
_UpperCAmelCase : Optional[int] = checkpoint["model"]
# fixup checkpoint
_UpperCAmelCase : Union[str, Any] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(_SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
_UpperCAmelCase : List[Any] = k[len(_SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
_UpperCAmelCase : Optional[Any] = new_k.replace(_SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
_UpperCAmelCase : str = state_dict.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_UpperCAmelCase : List[Any] = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_UpperCAmelCase : str = set(model.state_dict().keys() ) - set(state_dict.keys() )
_UpperCAmelCase : Optional[int] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(_SCREAMING_SNAKE_CASE , 3 )} loss""" )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> Tuple:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_UpperCAmelCase : Optional[Any] = "cpu" # do conversion on cpu
_UpperCAmelCase : List[str] = _get_ckpt_path(_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = _load_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
# load bark initial model
_UpperCAmelCase : Union[str, Any] = _bark_load_model(_SCREAMING_SNAKE_CASE , "cpu" , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
if model_type == "text":
_UpperCAmelCase : Tuple = bark_model["model"]
if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_UpperCAmelCase : Dict = 5
_UpperCAmelCase : List[str] = 1_0
if model_type in ["text", "coarse"]:
_UpperCAmelCase : str = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
_UpperCAmelCase : Dict = bark_model(_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
# take last logits
_UpperCAmelCase : Tuple = output_new_model_total.logits[:, [-1], :]
else:
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : Any = 8
_UpperCAmelCase : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = bark_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_UpperCAmelCase : Tuple = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_UpperCAmelCase : str = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_UpperCAmelCase : Dict = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_UpperCAmelCase : List[Any] = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_UpperCAmelCase : Dict = BarkConfig.from_sub_model_configs(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_UpperCAmelCase : Any = BarkModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = semantic
_UpperCAmelCase : Tuple = coarseAcoustic
_UpperCAmelCase : str = fineAcoustic
_UpperCAmelCase : str = codec
_UpperCAmelCase : Optional[Any] = bark_generation_config
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
bark.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
__lowerCamelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 328 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase_ = [3, 3, 3, 3]
lowercase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase_ = [4, 4, 4, 4]
lowercase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase_ = [3, 3, 3, 3]
else:
lowercase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase_ = 96
elif "small" in model_name:
lowercase_ = 96
elif "base" in model_name:
lowercase_ = 128
elif "large" in model_name:
lowercase_ = 192
elif "xlarge" in model_name:
lowercase_ = 256
elif "huge" in model_name:
lowercase_ = 352
# set label information
lowercase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase_ = '''imagenet-22k-id2label.json'''
else:
lowercase_ = '''imagenet-1k-id2label.json'''
lowercase_ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ = {int(snake_case__ ): v for k, v in idalabel.items()}
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = FocalNetConfig(
embed_dim=snake_case__ , depths=snake_case__ , focal_levels=snake_case__ , focal_windows=snake_case__ , use_conv_embed=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , use_post_layernorm=snake_case__ , use_layerscale=snake_case__ , )
return config
def a ( snake_case__: Any ):
'''simple docstring'''
if "patch_embed.proj" in name:
lowercase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowercase_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase_ = '''layernorm.weight'''
if name == "norm.bias":
lowercase_ = '''layernorm.bias'''
if "head" in name:
lowercase_ = name.replace('''head''' , '''classifier''' )
else:
lowercase_ = '''focalnet.''' + name
return name
def a ( snake_case__: str , snake_case__: str , snake_case__: Any=False ):
'''simple docstring'''
# fmt: off
lowercase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , snake_case__ )
lowercase_ = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase_ = state_dict.pop(snake_case__ )
lowercase_ = val
lowercase_ = get_focalnet_config(snake_case__ )
lowercase_ = FocalNetForImageClassification(snake_case__ )
model.eval()
# load state dict
model.load_state_dict(snake_case__ )
# verify conversion
lowercase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ = BitImageProcessor(
do_resize=snake_case__ , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case__ , crop_size=224 , do_normalize=snake_case__ , image_mean=snake_case__ , image_std=snake_case__ , )
lowercase_ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
lowercase_ = processor(images=snake_case__ , return_tensors='''pt''' )
lowercase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase_ = image_transforms(snake_case__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , snake_case__ , atol=1e-4 )
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__a = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 97 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Dict , lowercase__ : Dict , lowercase__ : Optional[Any]=13 , lowercase__ : Dict=7 , lowercase__ : Dict=True , lowercase__ : Optional[Any]=True , lowercase__ : Optional[int]=False , lowercase__ : Any=True , lowercase__ : Union[str, Any]=99 , lowercase__ : Optional[int]=32 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : List[str]=64 , lowercase__ : Any="gelu" , lowercase__ : Optional[Any]=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Dict=5_12 , lowercase__ : List[str]=16 , lowercase__ : Union[str, Any]=2 , lowercase__ : str=0.0_2 , lowercase__ : Optional[int]=3 , lowercase__ : Union[str, Any]=4 , lowercase__ : Union[str, Any]=None , lowercase__ : Optional[int]=2 , lowercase__ : Optional[int]=2 , lowercase__ : List[Any]=2 , lowercase__ : Optional[int]=2 , lowercase__ : Union[str, Any]=4 , lowercase__ : Tuple=1 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = q_groups
_lowerCAmelCase = k_groups
_lowerCAmelCase = v_groups
_lowerCAmelCase = post_attention_groups
_lowerCAmelCase = intermediate_groups
_lowerCAmelCase = output_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : int ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Dict , lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Tuple ):
_lowerCAmelCase = SqueezeBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , lowercase__ )
_lowerCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = SqueezeBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : str , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : int ):
_lowerCAmelCase = SqueezeBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Tuple ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SqueezeBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Optional[int] ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SqueezeBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Any ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = SqueezeBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ =(
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ =False
UpperCamelCase__ =True
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = SqueezeBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , dim=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = SqueezeBertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
_lowerCAmelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
_lowerCAmelCase = model(lowercase__ )[0]
_lowerCAmelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase__ )
_lowerCAmelCase = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-4 ) )
| 192 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_A : List[Any] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["""input_values""", """padding_mask"""]
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 2_40_00 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : float = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Tuple:
super().__init__(feature_size=SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , padding_value=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = overlap
@property
def a ( self : List[Any] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a ( self : Dict ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE__ : Optional[Union[bool, str, PaddingStrategy]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
__lowerCAmelCase = True
__lowerCAmelCase = bool(
isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
__lowerCAmelCase = np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__lowerCAmelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE__ ).T]
# verify inputs are valid
for idx, example in enumerate(SCREAMING_SNAKE_CASE__ ):
if example.ndim > 2:
raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" )
__lowerCAmelCase = None
__lowerCAmelCase = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__lowerCAmelCase = min(array.shape[0] for array in raw_audio )
__lowerCAmelCase = int(np.floor(max_length / self.chunk_stride ) )
__lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__lowerCAmelCase = max(array.shape[0] for array in raw_audio )
__lowerCAmelCase = int(np.ceil(max_length / self.chunk_stride ) )
__lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
__lowerCAmelCase = """max_length"""
else:
__lowerCAmelCase = input_values
# normal padding on batch
if padded_inputs is None:
__lowerCAmelCase = self.pad(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
if padding:
__lowerCAmelCase = padded_inputs.pop("""attention_mask""" )
__lowerCAmelCase = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
__lowerCAmelCase = example[..., None]
input_values.append(example.T )
__lowerCAmelCase = input_values
if return_tensors is not None:
__lowerCAmelCase = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE__ )
return padded_inputs
| 712 | '''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_A : Union[str, Any] = get_tests_dir('''fixtures''')
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : Optional[Any] ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase = mock.Mock()
__lowerCAmelCase = 5_00
__lowerCAmelCase = {}
__lowerCAmelCase = HTTPError
__lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=SCREAMING_SNAKE_CASE__ ) as mock_head:
__lowerCAmelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def a ( self : Union[str, Any] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def a ( self : int ) -> Union[str, Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a ( cls : Optional[int] ) -> Tuple:
__lowerCAmelCase = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@classmethod
def a ( cls : Tuple ) -> int:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def a ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id="""test-image-processor""" , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def a ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
__lowerCAmelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def a ( self : Dict ) -> int:
CustomImageProcessor.register_for_auto_class()
__lowerCAmelCase = CustomImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=SCREAMING_SNAKE_CASE__ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 330 | 0 |
'''simple docstring'''
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
'''simple docstring'''
a_ : List[str] = val
a_ : Optional[Any] = None
a_ : Optional[int] = None
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
a_ : Union[str, Any] = Node(lowerCAmelCase_ )
else:
self.left.insert(lowerCAmelCase_ )
elif val > self.val:
if self.right is None:
a_ : Tuple = Node(lowerCAmelCase_ )
else:
self.right.insert(lowerCAmelCase_ )
else:
a_ : Optional[int] = val
def _snake_case ( A_ : int , A_ : Union[str, Any] ):
"""simple docstring"""
if root:
inorder(root.left , A_ )
res.append(root.val )
inorder(root.right , A_ )
def _snake_case ( A_ : List[Any] ):
"""simple docstring"""
if len(A_ ) == 0:
return arr
a_ : Optional[Any] = Node(arr[0] )
for i in range(1 , len(A_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
a_ : List[Any] = []
inorder(A_ , A_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 577 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = DistilBertTokenizer
a_ = DistilBertTokenizerFast
a_ = True
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
a_ : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_ )
a_ : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_ )
a_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 577 | 1 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class _UpperCamelCase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = "efficientformer"
def __init__( self : List[Any] , snake_case : List[int] = [3, 2, 6, 4] , snake_case : List[int] = [48, 96, 224, 448] , snake_case : List[bool] = [True, True, True, True] , snake_case : int = 448 , snake_case : int = 32 , snake_case : int = 4 , snake_case : int = 7 , snake_case : int = 5 , snake_case : int = 8 , snake_case : int = 4 , snake_case : float = 0.0 , snake_case : int = 16 , snake_case : int = 3 , snake_case : int = 3 , snake_case : int = 3 , snake_case : int = 2 , snake_case : int = 1 , snake_case : float = 0.0 , snake_case : int = 1 , snake_case : bool = True , snake_case : bool = True , snake_case : float = 1e-5 , snake_case : str = "gelu" , snake_case : float = 0.02 , snake_case : float = 1e-12 , snake_case : int = 224 , snake_case : float = 1e-05 , **snake_case : Optional[Any] , ) -> str:
'''simple docstring'''
super().__init__(**_snake_case )
__magic_name__ : Any = hidden_act
__magic_name__ : Optional[int] = hidden_dropout_prob
__magic_name__ : Tuple = hidden_sizes
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[str] = patch_size
__magic_name__ : Tuple = num_channels
__magic_name__ : List[str] = depths
__magic_name__ : str = mlp_expansion_ratio
__magic_name__ : List[Any] = downsamples
__magic_name__ : Union[str, Any] = dim
__magic_name__ : str = key_dim
__magic_name__ : Union[str, Any] = attention_ratio
__magic_name__ : Optional[Any] = resolution
__magic_name__ : Dict = pool_size
__magic_name__ : Any = downsample_patch_size
__magic_name__ : List[Any] = downsample_stride
__magic_name__ : Any = downsample_pad
__magic_name__ : Dict = drop_path_rate
__magic_name__ : Union[str, Any] = num_metaad_blocks
__magic_name__ : Any = distillation
__magic_name__ : int = use_layer_scale
__magic_name__ : Optional[int] = layer_scale_init_value
__magic_name__ : Optional[Any] = image_size
__magic_name__ : Optional[Any] = batch_norm_eps
| 708 |
"""simple docstring"""
import functools
def UpperCamelCase_ ( lowerCamelCase : str , lowerCamelCase : str ) -> int:
"""simple docstring"""
__magic_name__ : List[str] = len(lowerCamelCase )
__magic_name__ : Dict = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase : int , lowerCamelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__magic_name__ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase_ : Optional[int] = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ['''DPTFeatureExtractor''']
UpperCAmelCase_ : Dict = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 512 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase__ : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : str = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowercase__ : Dict = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : List[str] = LxmertTokenizer
def __init__( self : Dict , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , lowercase_ : int=True , lowercase_ : List[str]="[UNK]" , lowercase_ : int="[SEP]" , lowercase_ : Union[str, Any]="[PAD]" , lowercase_ : Optional[int]="[CLS]" , lowercase_ : Union[str, Any]="[MASK]" , lowercase_ : Tuple=True , lowercase_ : Tuple=None , **lowercase_ : Union[str, Any] , ):
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowercase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase_ ) != tokenize_chinese_chars
):
snake_case_ : int = getattr(lowercase_ , normalizer_state.pop('''type''' ) )
snake_case_ : str = do_lower_case
snake_case_ : int = strip_accents
snake_case_ : str = tokenize_chinese_chars
snake_case_ : Tuple = normalizer_class(**lowercase_ )
snake_case_ : Any = do_lower_case
def _snake_case ( self : str , lowercase_ : Any , lowercase_ : List[str]=None ):
snake_case_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : str = [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ):
snake_case_ : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 123 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
def _a ( __lowerCAmelCase : Dict ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCAmelCase ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""pixel_values"""]
def __init__( self : Dict , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case_ )
snake_case__ : List[str] = size if size is not None else {'''shortest_edge''': 2_2_4}
snake_case__ : List[str] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
snake_case__ : List[str] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
snake_case__ : int = get_size_dict(snake_case_ , param_name='''crop_size''' )
snake_case__ : Optional[int] = do_resize
snake_case__ : Dict = size
snake_case__ : str = do_center_crop
snake_case__ : str = crop_size
snake_case__ : str = resample
snake_case__ : Tuple = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : Optional[int] = do_normalize
snake_case__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : Dict , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[str] , ):
'''simple docstring'''
snake_case__ : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" in size:
snake_case__ : Tuple = get_resize_output_image_size(snake_case_ , size['''shortest_edge'''] , default_to_square=snake_case_ )
elif "height" in size and "width" in size:
snake_case__ : int = (size['''height'''], size['''width'''])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Dict , ):
'''simple docstring'''
snake_case__ : List[str] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(snake_case_ , size=(size['''height'''], size['''width''']) , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : str , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Optional[int] , ):
'''simple docstring'''
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Any , ):
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[Any] , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case__ : str = to_numpy_array(snake_case_ )
if do_resize:
snake_case__ : str = self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ )
if do_center_crop:
snake_case__ : Tuple = self.center_crop(snake_case_ , size=snake_case_ )
if do_rescale:
snake_case__ : Optional[int] = self.rescale(image=snake_case_ , scale=snake_case_ )
if do_normalize:
snake_case__ : Optional[Any] = self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ )
snake_case__ : Union[str, Any] = to_channel_dimension_format(snake_case_ , snake_case_ )
return image
def __magic_name__ ( self : str , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : List[Any] , ):
'''simple docstring'''
snake_case__ : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case__ : Union[str, Any] = resample if resample is not None else self.resample
snake_case__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case__ : Any = image_std if image_std is not None else self.image_std
snake_case__ : str = size if size is not None else self.size
snake_case__ : List[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
snake_case__ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case__ : str = get_size_dict(snake_case_ , param_name='''crop_size''' )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
snake_case__ : Optional[Any] = make_batched(snake_case_ )
snake_case__ : Dict = [
[
self._preprocess_image(
image=snake_case_ , do_resize=snake_case_ , size=snake_case_ , resample=snake_case_ , do_center_crop=snake_case_ , crop_size=snake_case_ , do_rescale=snake_case_ , rescale_factor=snake_case_ , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , data_format=snake_case_ , )
for img in video
]
for video in videos
]
snake_case__ : Any = {'''pixel_values''': videos}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 502 |
'''simple docstring'''
def _a ( __lowerCAmelCase : int ):
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case__ : Any = 4
snake_case__ : int = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ : Tuple = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 502 | 1 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Optional[Any] , a_ : str ):
"""simple docstring"""
with open(a_ , encoding="utf-8" ) as input_file:
__snake_case = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__snake_case = input_file.read()
__snake_case = regexp.search(a_ )
return match
def A ( self : Any , a_ : str ):
"""simple docstring"""
with open(a_ , encoding="utf-8" ) as input_file:
__snake_case = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__snake_case = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__snake_case = regexp.finditer(a_ )
__snake_case = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = Path("./datasets" )
__snake_case = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(a_ ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = Path("./datasets" )
__snake_case = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(a_ ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 69 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> list:
__snake_case = len(_UpperCAmelCase )
__snake_case = []
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
__snake_case = True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
__snake_case = False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 69 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def A ( snake_case__ : Tuple , snake_case__ : Tuple ) -> Dict:
'''simple docstring'''
# ===== initialization =====
__snake_case = Mock()
__snake_case = conn, Mock()
__snake_case = iter([1, None] )
__snake_case = lambda snake_case__ : next(snake_case__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=snake_case__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 676 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.