code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import math
import random
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCAmelCase_ : List[str] = 0.02
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Forward propagation
_SCREAMING_SNAKE_CASE : Optional[Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_SCREAMING_SNAKE_CASE : Tuple = (expected / 100) - layer_a
# Error delta
_SCREAMING_SNAKE_CASE : List[str] = layer_1_error * sigmoid_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = int(input('Expected value: '))
UpperCAmelCase_ : List[str] = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 200 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Optional[int] = """canine"""
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1_6384 , __snake_case=16 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0 , __snake_case=0XE000 , __snake_case=0XE001 , __snake_case=4 , __snake_case=4 , __snake_case=8 , __snake_case=1_6384 , __snake_case=128 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
_SCREAMING_SNAKE_CASE : str = max_position_embeddings
_SCREAMING_SNAKE_CASE : Any = hidden_size
_SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_act
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
# Character config:
_SCREAMING_SNAKE_CASE : Any = downsampling_rate
_SCREAMING_SNAKE_CASE : List[str] = upsampling_kernel_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hash_functions
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hash_buckets
_SCREAMING_SNAKE_CASE : Union[str, Any] = local_transformer_stride
| 200 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCAmelCase__ ( unittest.TestCase):
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=1_6 , lowercase=2 , lowercase=0.02 , lowercase=4 , ) -> str:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_attention_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_choices
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_attention_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = True
__UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = FlaxBertModelTester(self )
@slow
def __lowerCamelCase ( self ) -> Any:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
__UpperCamelCase = FlaxBertModel.from_pretrained("""bert-base-cased""" )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 243 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCAmelCase__ ( unittest.TestCase):
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=1_6 , lowercase=2 , lowercase=0.02 , lowercase=4 , ) -> str:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_attention_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_choices
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_attention_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = True
__UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = FlaxBertModelTester(self )
@slow
def __lowerCamelCase ( self ) -> Any:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
__UpperCamelCase = FlaxBertModel.from_pretrained("""bert-base-cased""" )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 243 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = '''realm'''
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 ,SCREAMING_SNAKE_CASE__ : int=1_2_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 ,SCREAMING_SNAKE_CASE__ : List[str]=1_2 ,SCREAMING_SNAKE_CASE__ : Any=8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : int="gelu_new" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-12 ,SCREAMING_SNAKE_CASE__ : Dict=2_5_6 ,SCREAMING_SNAKE_CASE__ : str=1_0 ,SCREAMING_SNAKE_CASE__ : Any=1E-3 ,SCREAMING_SNAKE_CASE__ : List[str]=5 ,SCREAMING_SNAKE_CASE__ : str=3_2_0 ,SCREAMING_SNAKE_CASE__ : int=1_3_3_5_3_7_1_8 ,SCREAMING_SNAKE_CASE__ : Dict=5_0_0_0 ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2 ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
# Common config
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[str] = retriever_proj_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : str = num_attention_heads
__lowerCamelCase : List[Any] = num_candidates
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : Dict = type_vocab_size
__lowerCamelCase : List[str] = layer_norm_eps
# Reader config
__lowerCamelCase : Dict = span_hidden_size
__lowerCamelCase : List[Any] = max_span_width
__lowerCamelCase : Dict = reader_layer_norm_eps
__lowerCamelCase : List[str] = reader_beam_size
__lowerCamelCase : Optional[Any] = reader_seq_len
# Retrieval config
__lowerCamelCase : Optional[Any] = num_block_records
__lowerCamelCase : Optional[Any] = searcher_beam_size
| 73 |
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : List[str] = list(accumulate(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Tuple=10 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : Any=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Dict="relu" , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : Optional[int]=None , ):
_A : Any = parent
_A : Union[str, Any] = batch_size
_A : str = image_size
_A : Tuple = num_channels
_A : Tuple = embeddings_size
_A : Union[str, Any] = hidden_sizes
_A : Optional[Any] = depths
_A : Union[str, Any] = is_training
_A : int = use_labels
_A : List[str] = hidden_act
_A : Optional[Any] = num_labels
_A : Tuple = scope
_A : List[Any] = len(SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
_A : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[int] = self.get_config()
return config, pixel_values
def A ( self : Tuple):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str]):
_A : Optional[int] = FlaxRegNetModel(config=SCREAMING_SNAKE_CASE)
_A : int = model(SCREAMING_SNAKE_CASE)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]):
_A : List[Any] = self.num_labels
_A : Tuple = FlaxRegNetForImageClassification(config=SCREAMING_SNAKE_CASE)
_A : Optional[Any] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A ( self : Union[str, Any]):
_A : List[Any] = self.prepare_config_and_inputs()
_A : Optional[Any] = config_and_inputs
_A : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a = False
a = False
a = False
def A ( self : Union[str, Any]):
_A : Optional[Any] = FlaxRegNetModelTester(self)
_A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE)
def A ( self : Any):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Union[str, Any]):
return
def A ( self : str):
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE)
@unittest.skip(reason='RegNet does not use inputs_embeds')
def A ( self : List[str]):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def A ( self : Optional[Any]):
pass
def A ( self : Any):
_A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Dict = model_class(SCREAMING_SNAKE_CASE)
_A : str = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[str] = [*signature.parameters.keys()]
_A : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int]):
_A : int = model_class(SCREAMING_SNAKE_CASE)
_A : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
_A : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE) , expected_num_stages + 1)
_A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : Any):
_A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_A : List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE)
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any]):
return model(pixel_values=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
with self.subTest('JIT Enabled'):
_A : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
_A : List[Any] = model_jitted(**SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE) , len(SCREAMING_SNAKE_CASE))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
self.assertEqual(jitted_output.shape , output.shape)
def lowerCAmelCase__ ( ):
_A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Any):
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040') if is_vision_available() else None
@slow
def A ( self : List[str]):
_A : List[str] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040')
_A : Optional[int] = self.default_image_processor
_A : Any = prepare_img()
_A : Dict = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='np')
_A : List[Any] = model(**SCREAMING_SNAKE_CASE)
# verify the logits
_A : List[Any] = (1, 1000)
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE)
_A : Tuple = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
| 360 |
'''simple docstring'''
from __future__ import annotations
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple=None):
_A : Any = data
_A : Optional[Any] = None
def __repr__( self : List[str]):
_A : List[Any] = []
_A : Any = self
while temp:
string_rep.append(F'{temp.data}')
_A : List[Any] = temp.next
return "->".join(SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( lowerCamelCase : list ):
if not elements_list:
raise Exception('The Elements List is empty' )
_A : Union[str, Any] = Node(elements_list[0] )
for i in range(1 ,len(lowerCamelCase ) ):
_A : Dict = Node(elements_list[i] )
_A : int = current.next
return head
def lowerCAmelCase__ ( lowerCamelCase : Node ):
if head_node is not None and isinstance(lowerCamelCase ,lowerCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase__ ( ):
from doctest import testmod
testmod()
_A : List[str] = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(lowerCamelCase )
print('Elements in Reverse:' )
print_reverse(lowerCamelCase )
if __name__ == "__main__":
main()
| 227 | 0 |
from __future__ import annotations
lowerCAmelCase = 8.988E9 # units = N * m^s * C^-2
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> dict[str, float]:
'''simple docstring'''
__lowercase= abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
__lowercase= COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__lowercase= abs(_UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__lowercase= abs(_UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__lowercase= (COULOMBS_CONSTANT * charge_product / abs(_UpperCAmelCase )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase ):
for row in range(_UpperCAmelCase ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase )
]
def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase )
def interpolated_func(_UpperCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase ) )
return interpolated_func
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ):
x_val += 1
ret += poly(_UpperCAmelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> Optional[int]:
'''simple docstring'''
return getitem, k
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
return setitem, k, v
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return delitem, k
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , *UpperCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
return fun(UpperCamelCase__ , *UpperCamelCase__ ), None
except Exception as e:
return None, e
UpperCAmelCase_ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
UpperCAmelCase_ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
UpperCAmelCase_ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
UpperCAmelCase_ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
UpperCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
_snake_case = HashMap(initial_block_size=4 )
_snake_case = {}
for _, (fun, *args) in enumerate(UpperCamelCase__ ):
_snake_case , _snake_case = _run_operation(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ )
_snake_case , _snake_case = _run_operation(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ )
assert my_res == py_res
assert str(UpperCamelCase__ ) == str(UpperCamelCase__ )
assert set(UpperCamelCase__ ) == set(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
def is_public(UpperCamelCase__ : str ) -> bool:
return not name.startswith('_' )
_snake_case = {name for name in dir({} ) if is_public(UpperCamelCase__ )}
_snake_case = {name for name in dir(HashMap() ) if is_public(UpperCamelCase__ )}
assert dict_public_names > hash_public_names
| 366 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1 |
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Any = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
"""simple docstring"""
import operator as op
A = '''scaler.pt'''
A = '''pytorch_model'''
A = '''random_states'''
A = '''optimizer'''
A = '''scheduler'''
A = '''pytorch_model.bin'''
A = '''pytorch_model.bin.index.json'''
A = '''model.safetensors'''
A = '''model.safetensors.index.json'''
A = '''1.10.2'''
A = '''py38'''
A = '''4.17.0'''
A = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
A = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
A = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
A = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
A = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
A = '''2.0.1'''
A = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
A = ['''default''', '''reduce-overhead''', '''max-autotune''']
A = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
A = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
A = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
A = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP'''] | 188 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowercase :
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( *_UpperCAmelCase , **_UpperCAmelCase ):
pass
def __A ( a_ :Image) -> str:
__a : List[str] = hashlib.mda(image.tobytes())
return m.hexdigest()[:10]
def __A ( a_ :Image) -> Dict:
__a : Any = np.array(a_)
__a : Tuple = npimg.shape
return {"hash": hashimage(a_), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _lowerCamelCase ( self ):
pass
@slow
@require_torch
def _lowerCamelCase ( self ):
__a : Dict = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
__a : Optional[Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
__a : Optional[int] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_9_6_7},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.9_9_3},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_9_0_9},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_8_7_9},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_8_3_4},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_7_1_6},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_6_1_2},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_5_9_9},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_5_5_2},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_5_3_2},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_5_1_6},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_4_9_9},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_4_8_3},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_4_6_4},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_4_0_8},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_3_3_5},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_3_2_6},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_2_6_2},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_9_9_9},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_6},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_4},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_3},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def _lowerCamelCase ( self ):
__a : Dict = '''facebook/sam-vit-huge'''
__a : Tuple = pipeline('''mask-generation''' , model=_UpperCAmelCase )
__a : List[Any] = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__a : Optional[int] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1_0},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3},
] , ) | 188 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str ) -> tuple[str, float]:
UpperCamelCase__ : List[Any] = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str ) -> tuple[str, str]:
UpperCamelCase__ : Dict = random.randint(0 , len(_lowerCamelCase ) - 1 )
UpperCamelCase__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
UpperCamelCase__ : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: list[str] ) -> str:
UpperCamelCase__ : Optional[Any] = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCamelCase__ : Optional[int] = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: tuple[str, float] , __UpperCAmelCase: list[tuple[str, float]] , __UpperCAmelCase: list[str] , ) -> list[str]:
UpperCamelCase__ : Optional[Any] = []
# Generate more children proportionally to the fitness score.
UpperCamelCase__ : Optional[Any] = int(parent_a[1] * 100 ) + 1
UpperCamelCase__ : Dict = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
UpperCamelCase__ : Union[str, Any] = population_score[random.randint(0 , _lowerCamelCase )][0]
UpperCamelCase__ : int = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: list[str] , __UpperCAmelCase: bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCamelCase__ : Any = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCamelCase__ : Dict = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCamelCase__ : Optional[int] = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(_lowerCamelCase )
# Generate random starting population.
UpperCamelCase__ : Optional[int] = []
for _ in range(_lowerCamelCase ):
population.append(''''''.join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCamelCase__ : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCamelCase__ : Dict = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
UpperCamelCase__ : List[str] = sorted(_lowerCamelCase , key=lambda __UpperCAmelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCamelCase__ : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
UpperCamelCase__ : Optional[int] = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase_ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
UpperCAmelCase_ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
UpperCAmelCase_ = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 201 | """simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = OpenAIGPTTokenizer
__snake_case : Dict = OpenAIGPTTokenizerFast
__snake_case : Optional[Any] = True
__snake_case : Dict = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_SCREAMING_SNAKE_CASE = """lower"""
_SCREAMING_SNAKE_CASE = ["""low""", """er</w>"""]
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokens + ["""<unk>"""]
_SCREAMING_SNAKE_CASE = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# Simple input
_SCREAMING_SNAKE_CASE = """This is a simple input"""
_SCREAMING_SNAKE_CASE = ["""This is a simple input 1""", """This is a simple input 2"""]
_SCREAMING_SNAKE_CASE = ("""This is a simple input""", """This is a pair""")
_SCREAMING_SNAKE_CASE = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ):
pass
| 125 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Union[str, Any] = """naver-clova-ix/donut-base-finetuned-docvqa"""
snake_case__ : int = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
snake_case__ : Any = """document_qa"""
snake_case__ : List[str] = AutoProcessor
snake_case__ : Optional[Any] = VisionEncoderDecoderModel
snake_case__ : Optional[Any] = ["""image""", """text"""]
snake_case__ : Tuple = ["""text"""]
def __init__( self : Optional[int] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[int] ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : "Image" , __lowerCamelCase : str ):
UpperCamelCase :int = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
UpperCamelCase :List[Any] = task_prompt.replace("""{user_input}""" , __lowerCamelCase )
UpperCamelCase :Tuple = self.pre_processor.tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors="""pt""" ).input_ids
UpperCamelCase :List[str] = self.pre_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _A ( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowerCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowerCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowerCamelCase , ).sequences
def _A ( self : Union[str, Any] , __lowerCamelCase : str ):
UpperCamelCase :Optional[int] = self.pre_processor.batch_decode(__lowerCamelCase )[0]
UpperCamelCase :Any = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
UpperCamelCase :List[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
UpperCamelCase :Optional[int] = re.sub(R"""<.*?>""" , """""" , __lowerCamelCase , count=1 ).strip() # remove first task start token
UpperCamelCase :str = self.pre_processor.tokenajson(__lowerCamelCase )
return sequence["answer"]
| 38 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ ) -> bool:
if len(snake_case__ ) == 0:
return False
lowerCamelCase = len(snake_case__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case__ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")]
lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """
print(F"""{target} was {not_str}found in {sequence}""")
| 291 | 0 |
import math
def _a ( a :list , a :int = 0 , a :int = 0 ) -> list:
a = end or len(a )
for i in range(a , a ):
a = i
a = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a = array[temp_index - 1]
temp_index -= 1
a = temp_index_value
return array
def _a ( a :list , a :int , a :int ) -> None: # Max Heap
a = index
a = 2 * index + 1 # Left Node
a = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a = left_index
if right_index < heap_size and array[largest] < array[right_index]:
a = right_index
if largest != index:
a , a = array[largest], array[index]
heapify(a , a , a )
def _a ( a :list ) -> list:
a = len(a )
for i in range(n // 2 , -1 , -1 ):
heapify(a , a , a )
for i in range(n - 1 , 0 , -1 ):
a , a = array[0], array[i]
heapify(a , 0 , a )
return array
def _a ( a :list , a :int , a :int , a :int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _a ( a :list , a :int , a :int , a :int ) -> int:
a = low
a = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a , a = array[j], array[i]
i += 1
def _a ( a :list ) -> list:
if len(a ) == 0:
return array
a = 2 * math.ceil(math.loga(len(a ) ) )
a = 16
return intro_sort(a , 0 , len(a ) , a , a )
def _a ( a :list , a :int , a :int , a :int , a :int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a )
max_depth -= 1
a = median_of_a(a , a , start + ((end - start) // 2) + 1 , end - 1 )
a = partition(a , a , a , a )
intro_sort(a , a , a , a , a )
a = p
return insertion_sort(a , a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = input("Enter numbers separated by a comma : ").strip()
UpperCAmelCase__ = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 351 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ = "bart"
UpperCAmelCase__ = True
@st.cache(allow_output_mutation=a )
def _a ( ) -> Tuple:
if LOAD_DENSE_INDEX:
a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
a = qar_model.eval()
else:
a , a = (None, None)
if MODEL_TYPE == "bart":
a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
a = sas_model.eval()
else:
a , a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Dict:
if LOAD_DENSE_INDEX:
a = faiss.StandardGpuResources()
a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
a = faiss.IndexFlatIP(128 )
a = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
a , a = (None, None)
a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Optional[int]:
a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
a = elia['''train_eli5''']
a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_models()
UpperCAmelCase__ , UpperCAmelCase__ = load_train_data()
def _a ( a :str , a :Tuple=10 ) -> List[str]:
a = embed_questions_for_retrieval([question] , a , a )
a , a = eli5_train_q_index.search(a , a )
a = [elia_train[int(a )] for i in I[0]]
return nn_examples
def _a ( a :str , a :Any="wiki40b" , a :int="dense" , a :Union[str, Any]=10 ) -> List[str]:
if source == "none":
a , a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a , a = query_qa_dense_index(
a , a , a , a , a , a )
else:
a , a = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
a = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def _a ( a :Tuple , a :int , a :int , a :Dict=64 , a :List[Any]=256 , a :List[Any]=False , a :List[Any]=2 , a :Tuple=0.95 , a :Optional[Any]=0.8 ) -> int:
with torch.no_grad():
a = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase__ = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase__ = action_list.index(action_st)
UpperCAmelCase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase__ = show_type == "Show full text of passages"
else:
UpperCAmelCase__ = 3
UpperCAmelCase__ = True
UpperCAmelCase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase__ = "wiki40b"
UpperCAmelCase__ = "dense"
UpperCAmelCase__ = "beam"
UpperCAmelCase__ = 2
UpperCAmelCase__ = 64
UpperCAmelCase__ = 256
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ = None
# start main text
UpperCAmelCase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
UpperCAmelCase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ = support_list[:10]
UpperCAmelCase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase__ = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase__ = sec_titles.split(" & ")
UpperCAmelCase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ = find_nearest_training(question)
UpperCAmelCase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = IFInpaintingPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self ) -> str:
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith('''mps''' ):
a =torch.manual_seed(__A )
else:
a =torch.Generator(device=__A ).manual_seed(__A )
a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
a ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 81 | '''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCAmelCase = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 139 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__UpperCAmelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__UpperCAmelCase = concatenate_datasets
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadManager
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 139 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1 / 255, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> List[str]:
super().__init__(**__a )
UpperCamelCase : Optional[int] = size if size is not None else {"height": 224, "width": 224}
UpperCamelCase : Optional[Any] = get_size_dict(__a )
UpperCamelCase : str = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase : Tuple = get_size_dict(__a, default_to_square=__a, param_name='crop_size' )
UpperCamelCase : Optional[int] = do_resize
UpperCamelCase : Optional[int] = do_rescale
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : int = do_center_crop
UpperCamelCase : int = crop_size
UpperCamelCase : List[Any] = size
UpperCamelCase : Union[str, Any] = resample
UpperCamelCase : Union[str, Any] = rescale_factor
UpperCamelCase : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCamelCase : Union[str, Any] = get_size_dict(__a )
if "shortest_edge" in size:
UpperCamelCase : Dict = get_resize_output_image_size(__a, size=size['shortest_edge'], default_to_square=__a )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase : int = (size["height"], size["width"])
else:
raise ValueError(F"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__a, size=__a, resample=__a, data_format=__a, **__a )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Any:
UpperCamelCase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__a, size=(size['height'], size['width']), data_format=__a, **__a )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_ ) -> int:
return rescale(__a, scale=__a, data_format=__a, **__a )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Any:
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
UpperCamelCase : int = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase : Union[str, Any] = get_size_dict(__a, param_name='crop_size', default_to_square=__a )
UpperCamelCase : int = resample if resample is not None else self.resample
UpperCamelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : str = image_std if image_std is not None else self.image_std
UpperCamelCase : str = size if size is not None else self.size
UpperCamelCase : Union[str, Any] = get_size_dict(__a )
if not is_batched(__a ):
UpperCamelCase : int = [images]
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
UpperCamelCase : str = [to_numpy_array(__a ) for image in images]
if do_resize:
UpperCamelCase : Union[str, Any] = [self.resize(image=__a, size=__a, resample=__a ) for image in images]
if do_center_crop:
UpperCamelCase : Optional[int] = [self.center_crop(image=__a, size=__a ) for image in images]
if do_rescale:
UpperCamelCase : Dict = [self.rescale(image=__a, scale=__a ) for image in images]
if do_normalize:
UpperCamelCase : Optional[int] = [self.normalize(image=__a, mean=__a, std=__a ) for image in images]
UpperCamelCase : str = [to_channel_dimension_format(__a, __a ) for image in images]
UpperCamelCase : str = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a )
| 119 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ):
_lowerCAmelCase : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = output_size
# determine new height and width
_lowerCAmelCase : List[Any] = output_height / input_height
_lowerCAmelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase : Union[str, Any] = scale_width
else:
# fit height
_lowerCAmelCase : Union[str, Any] = scale_height
_lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase )
_lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase )
return (new_height, new_width)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384}
_lowerCAmelCase : Optional[int] = get_size_dict(__a)
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Any = keep_aspect_ratio
_lowerCAmelCase : str = ensure_multiple_of
_lowerCAmelCase : str = resample
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : Optional[int] = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
_lowerCAmelCase : List[Any] = get_resize_output_image_size(
__a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, )
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[Any] = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(__a)
_lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[Any] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : List[Any] = target_sizes.numpy()
_lowerCAmelCase : Dict = []
for idx in range(len(__a)):
_lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : Dict = logits.argmax(dim=1)
_lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 36 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase_ : pyspark.sql.DataFrame , UpperCAmelCase_ : Optional[NamedSplit] = None , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "arrow" , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , **_A , )
a : Any = load_from_cache_file
a : str = file_format
a : Optional[int] = Spark(
df=_A , features=_A , cache_dir=_A , working_dir=_A , **_A , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
a : Tuple = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_A , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 357 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
a__ : int = logging.get_logger(__name__)
def UpperCAmelCase_( a__=None , a__=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=a__ )
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
__SCREAMING_SNAKE_CASE : List[int] = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__SCREAMING_SNAKE_CASE : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Benchmark training of model'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Verbose memory tracing'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Trace memory line by line'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Save result to a CSV file'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Save all print statements in a log file'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Whether to print environment information'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"inference_time_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"inference_memory_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"train_time_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"train_memory_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"env_info_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving environment information.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"log_{round(time() )}.csv" , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
__SCREAMING_SNAKE_CASE : int = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def __lowerCAmelCase ( self ) ->Optional[int]:
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _lowerCamelCase , )
def __lowerCAmelCase ( self ) ->List[str]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __lowerCAmelCase ( self ) ->List[str]:
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 313 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
A : int = trt.Logger(trt.Logger.WARNING)
A : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
A : Union[str, Any] = logging.getLogger(__name__)
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
A : List[Any] = parser.parse_args()
if args.tokenizer_name:
A : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
A : Optional[Any] = args.per_device_eval_batch_size
A : Tuple = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
A : Any = True
A : Optional[int] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
A : Union[str, Any] = 'temp_engine/bert-fp16.engine'
if args.inta:
A : Optional[int] = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
A : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
A : List[str] = [network.get_input(i) for i in range(network.num_inputs)]
A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
A : Union[str, Any] = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
A : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
A : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowercase__ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowercase__ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __magic_name__ )
# start time
lowercase__ = time.time()
# Run inference
context.execute_async(
bindings=[int(__magic_name__ ) for d_inp in d_inputs] + [int(__magic_name__ ), int(__magic_name__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase__ = time.time()
lowercase__ = end_time - start_time
lowercase__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
A : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A : str = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
A : str = raw_datasets['validation'].column_names
A : Any = 'question' if 'question' in column_names else column_names[0]
A : int = 'context' if 'context' in column_names else column_names[1]
A : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
A : Dict = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
A : str = min(args.max_seq_length, tokenizer.model_max_length)
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=__magic_name__ , stride=args.doc_stride , return_overflowing_tokens=__magic_name__ , return_offsets_mapping=__magic_name__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase__ = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase__ = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase__ = tokenized_examples.sequence_ids(__magic_name__ )
lowercase__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
A : Optional[Any] = raw_datasets['validation']
# Validation Feature Creation
A : int = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
A : Dict = default_data_collator
A : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
A : Optional[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any]="eval" ) -> List[Any]:
"""simple docstring"""
lowercase__ = postprocess_qa_predictions(
examples=__magic_name__ , features=__magic_name__ , predictions=__magic_name__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__magic_name__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase__ = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase__ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase__ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__magic_name__ , label_ids=__magic_name__ )
A : Union[str, Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(__magic_name__ ) ) * engine.get_binding_dtype(__magic_name__ ).itemsize
# Allocate device memory for inputs and outputs.
A : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
A : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
A : List[str] = cuda.mem_alloc(h_outputa.nbytes)
A : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
A : List[Any] = 0.0
A : Any = 0
A : str = timeit.default_timer()
A : Tuple = None
for step, batch in enumerate(eval_dataloader):
A , A : Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
A , A : int = outputs
A : str = torch.tensor(start_logits)
A : int = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
A : Tuple = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
A : Any = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
A : str = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
A : List[str] = nested_truncate(all_preds, len(eval_dataset))
A : List[Any] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
A : Dict = post_processing_function(eval_examples, eval_dataset, all_preds)
A : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 146 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableDiffusionInpaintPipeline
A__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
def lowerCamelCase__ (self : Tuple ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
lowercase__ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase__ = CLIPTextModel(_UpperCAmelCase )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any]=0 ) -> List[str]:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
lowercase__ = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
lowercase__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = sd_pipe(**_UpperCAmelCase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : str ) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
lowercase__ = """stabilityai/stable-diffusion-2-inpainting"""
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__ = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="""np""" , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
lowercase__ = """stabilityai/stable-diffusion-2-inpainting"""
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__ = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="""np""" , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowercase__ = """stabilityai/stable-diffusion-2-inpainting"""
lowercase__ = PNDMScheduler.from_pretrained(_UpperCAmelCase , subfolder="""scheduler""" )
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , scheduler=_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 146 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
A_ = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
_snake_case : Optional[Any] = [image]
_snake_case : str = [trans(img.convert("""RGB""" ) ) for img in image]
_snake_case : int = torch.stack(_lowerCAmelCase )
return image
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Optional[int], a_: List[str], a_: Optional[int] ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_snake_case : List[str] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a_, scheduler=a_ )
def UpperCamelCase_ ( self: int, a_: str ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def UpperCamelCase_ ( self: Optional[int], a_: Optional[Any], a_: int, a_: Optional[int] ):
'''simple docstring'''
_snake_case : Any = min(int(num_inference_steps * strength ), a_ )
_snake_case : List[Any] = max(num_inference_steps - init_timestep, 0 )
_snake_case : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase_ ( self: Any, a_: Dict, a_: Optional[Any], a_: Optional[int], a_: Optional[Any], a_: int, a_: Tuple=None ):
'''simple docstring'''
if not isinstance(a_, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a_ )}" )
_snake_case : List[str] = image.to(device=a_, dtype=a_ )
if isinstance(a_, a_ ) and len(a_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(a_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_snake_case : List[Any] = init_latents.shape
_snake_case : Any = randn_tensor(a_, generator=a_, device=a_, dtype=a_ )
# get latents
print("""add noise to latents at timestep""", a_ )
_snake_case : List[str] = self.scheduler.add_noise(a_, a_, a_ )
_snake_case : int = init_latents
return latents
@torch.no_grad()
def __call__( self: int, a_: Union[torch.FloatTensor, PIL.Image.Image] = None, a_: float = 0.8, a_: int = 1, a_: Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_: float = 0.0, a_: int = 50, a_: Optional[bool] = None, a_: Optional[str] = "pil", a_: bool = True, ):
'''simple docstring'''
self.check_inputs(a_ )
# 2. Preprocess image
_snake_case : List[str] = preprocess(a_ )
# 3. set timesteps
self.scheduler.set_timesteps(a_, device=self.device )
_snake_case , _snake_case : Any = self.get_timesteps(a_, a_, self.device )
_snake_case : str = timesteps[:1].repeat(a_ )
# 4. Prepare latent variables
_snake_case : int = self.prepare_latents(a_, a_, a_, self.unet.dtype, self.device, a_ )
_snake_case : List[str] = latents
# 5. Denoising loop
for t in self.progress_bar(a_ ):
# 1. predict noise model_output
_snake_case : List[Any] = self.unet(a_, a_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_snake_case : Dict = self.scheduler.step(
a_, a_, a_, eta=a_, use_clipped_model_output=a_, generator=a_, ).prev_sample
_snake_case : str = (image / 2 + 0.5).clamp(0, 1 )
_snake_case : Optional[int] = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
_snake_case : Dict = self.numpy_to_pil(a_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a_ )
| 64 |
def lowerCAmelCase ( _lowerCAmelCase : str ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 169 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Any = abs(__lowerCAmelCase )
lowerCamelCase__ : List[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Optional[Any] = abs(__lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) )
def SCREAMING_SNAKE_CASE ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) -> None:
lowerCamelCase__ : Dict = F"""{func.__name__}({value})"""
lowerCamelCase__ : Tuple = timeit(F"""__main__.{call}""" , setup='import __main__' )
print(F"""{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 361 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : List[str] = logging.getLogger()
_UpperCAmelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> List[Any]:
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowerCamelCase__ : Tuple = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase__ : str = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase__ : Optional[int] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : str = "pytorch" ) -> str:
lowerCamelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'output' )
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
lowerCamelCase__ : Dict = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase__ : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
lowerCamelCase__ : Dict = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
lowerCamelCase__ : Dict = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ : List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def A_ ( self : Any ) -> List[Any]:
lowerCamelCase__ : str = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def A_ ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : Tuple = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 45 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 243 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 243 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] ) -> Dict:
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict ) -> Optional[Any]:
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 73 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 103 |
import cmath
import math
def a( A : float , A : float , A : float , A : float ) -> complex:
"""simple docstring"""
a = math.radians(A )
a = math.radians(A )
# Convert voltage and current to rectangular form
a = cmath.rect(A , A )
a = cmath.rect(A , A )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 | 0 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase_ = get_logger(__name__)
class A_ :
'''simple docstring'''
__snake_case = """dummy_data"""
__snake_case = """datasets"""
__snake_case = False
def __init__( self: Dict , a: str , a: str , a: Union[Version, str] , a: Optional[str] = None , a: bool = False , a: bool = True , a: Optional[List[Callable]] = None , ):
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[Any] = dataset_name
__lowerCamelCase : Tuple = cache_dir
__lowerCamelCase : Any = use_local_dummy_data
__lowerCamelCase : Optional[int] = config
# download_callbacks take a single url as input
__lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCamelCase : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCamelCase : List[str] = str(a )
# to be downloaded
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : List[Any] = None
@property
def _snake_case ( self: Tuple ):
if self._dummy_file is None:
__lowerCamelCase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def _snake_case ( self: Union[str, Any] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def _snake_case ( self: List[str] ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCamelCase : Tuple = cached_path(
a , cache_dir=self.cache_dir , extract_compressed_file=a , force_extract=a )
return os.path.join(a , self.dummy_file_name )
@property
def _snake_case ( self: Dict ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _snake_case ( self: int ):
if self._bucket_url is None:
__lowerCamelCase : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def _snake_case ( self: Union[str, Any] ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def _snake_case ( self: Optional[Any] , a: Any , *a: str ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCamelCase : List[str] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCamelCase : Tuple = self.dummy_file_name
# special case when data_url is a dict
if isinstance(a , a ):
return self.create_dummy_data_dict(a , a )
elif isinstance(a , (list, tuple) ):
return self.create_dummy_data_list(a , a )
else:
return self.create_dummy_data_single(a , a )
def _snake_case ( self: Tuple , a: Optional[int] , *a: str ):
return self.download_and_extract(a )
def _snake_case ( self: int , a: Optional[Any] , a: Dict ):
return self.download_and_extract(a )
def _snake_case ( self: Dict , a: Tuple , *a: Optional[Any] , **a: List[str] ):
return path
def _snake_case ( self: List[str] ):
return {}
def _snake_case ( self: Union[str, Any] , a: int , a: int ):
__lowerCamelCase : Union[str, Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(a , a ):
for single_url in single_urls:
download_callback(a )
else:
__lowerCamelCase : Dict = single_urls
download_callback(a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(a , a ):
__lowerCamelCase : List[str] = [os.path.join(a , urllib.parse.quote_plus(Path(a ).name ) ) for x in single_urls]
else:
__lowerCamelCase : str = single_urls
__lowerCamelCase : Tuple = os.path.join(a , urllib.parse.quote_plus(Path(a ).name ) )
__lowerCamelCase : Tuple = value
# make sure that values are unique
if all(isinstance(a , a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowerCamelCase : Any = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _snake_case ( self: int , a: Union[str, Any] , a: Union[str, Any] ):
__lowerCamelCase : Any = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCamelCase : str = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , a ) ) for url in data_url )
__lowerCamelCase : Dict = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowerCamelCase : List[str] = [data_url[0]] * len(a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase : List[str] = os.path.join(a , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(a )
return dummy_data_list
def _snake_case ( self: Union[str, Any] , a: Union[str, Any] , a: Optional[int] ):
for download_callback in self.download_callbacks:
download_callback(a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase : Any = os.path.join(a , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _snake_case ( self: Union[str, Any] ):
pass
def _snake_case ( self: Optional[int] ):
pass
def _snake_case ( self: Optional[Any] , a: Union[str, Any] ):
def _iter_archive_members(a: List[str] ):
# this preserves the order of the members inside the ZIP archive
__lowerCamelCase : List[str] = Path(self.dummy_file ).parent
__lowerCamelCase : str = path.relative_to(a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowerCamelCase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(a )
__lowerCamelCase : Union[str, Any] = Path(a )
__lowerCamelCase : List[str] = _iter_archive_members(a ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(a ).as_posix(), file_path.open('rb' )
def _snake_case ( self: Union[str, Any] , a: Union[str, Any] ):
if not isinstance(a , a ):
__lowerCamelCase : Any = [paths]
for path in paths:
if os.path.isfile(a ):
if os.path.basename(a ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(a ):
if os.path.basename(a ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(a ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(a , a )
| 357 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
__lowerCamelCase : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowerCamelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__lowerCamelCase : Optional[int] = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0
__lowerCamelCase : List[str] = image.transpose(0 , 3 , 1 , 2 )
__lowerCamelCase : Union[str, Any] = 2.0 * image - 1.0
__lowerCamelCase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
__lowerCamelCase : str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.9_995 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
__lowerCamelCase : List[str] = True
__lowerCamelCase : str = va.device
__lowerCamelCase : int = va.cpu().numpy()
__lowerCamelCase : List[str] = va.cpu().numpy()
__lowerCamelCase : str = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
__lowerCamelCase : Union[str, Any] = (1 - t) * va + t * va
else:
__lowerCamelCase : List[Any] = np.arccos(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = np.sin(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = theta_a * t
__lowerCamelCase : List[Any] = np.sin(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = np.sin(theta_a - theta_t ) / sin_theta_a
__lowerCamelCase : List[Any] = sin_theta_t / sin_theta_a
__lowerCamelCase : Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
__lowerCamelCase : str = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
__lowerCamelCase : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for param in model.parameters():
__lowerCamelCase : Any = value
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Any , a: AutoencoderKL , a: CLIPTextModel , a: CLIPModel , a: CLIPTokenizer , a: UNetaDConditionModel , a: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , a: CLIPFeatureExtractor , a: Union[str, Any]=None , a: Union[str, Any]=None , a: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=a , text_encoder=a , clip_model=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , coca_model=a , coca_tokenizer=a , coca_transform=a , )
__lowerCamelCase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size , a )
else feature_extractor.size['shortest_edge']
)
__lowerCamelCase : List[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , a )
set_requires_grad(self.clip_model , a )
def _snake_case ( self: Optional[Any] , a: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def _snake_case ( self: Dict ):
self.enable_attention_slicing(a )
def _snake_case ( self: Optional[Any] ):
set_requires_grad(self.vae , a )
def _snake_case ( self: List[Any] ):
set_requires_grad(self.vae , a )
def _snake_case ( self: int ):
set_requires_grad(self.unet , a )
def _snake_case ( self: int ):
set_requires_grad(self.unet , a )
def _snake_case ( self: Optional[Any] , a: Union[str, Any] , a: List[str] , a: List[Any] ):
# get the original timestep using init_timestep
__lowerCamelCase : List[Any] = min(int(num_inference_steps * strength ) , a )
__lowerCamelCase : str = max(num_inference_steps - init_timestep , 0 )
__lowerCamelCase : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self: Union[str, Any] , a: Optional[Any] , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , a: List[str]=None ):
if not isinstance(a , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(a )}' )
__lowerCamelCase : Union[str, Any] = image.to(device=a , dtype=a )
if isinstance(a , a ):
__lowerCamelCase : str = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
__lowerCamelCase : Tuple = torch.cat(a , dim=0 )
else:
__lowerCamelCase : List[Any] = self.vae.encode(a ).latent_dist.sample(a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : List[str] = 0.1_8_2_1_5 * init_latents
__lowerCamelCase : Union[str, Any] = init_latents.repeat_interleave(a , dim=0 )
__lowerCamelCase : Optional[int] = randn_tensor(init_latents.shape , generator=a , device=a , dtype=a )
# get latents
__lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(a , a , a )
__lowerCamelCase : int = init_latents
return latents
def _snake_case ( self: Optional[int] , a: Any ):
__lowerCamelCase : List[Any] = self.coca_transform(a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowerCamelCase : Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__lowerCamelCase : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def _snake_case ( self: Any , a: Tuple , a: Tuple ):
__lowerCamelCase : Dict = self.feature_extractor.preprocess(a )
__lowerCamelCase : Dict = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
__lowerCamelCase : List[str] = self.clip_model.get_image_features(a )
__lowerCamelCase : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
__lowerCamelCase : Tuple = image_embeddings_clip.repeat_interleave(a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _snake_case ( self: str , a: str , a: int , a: List[Any] , a: str , a: List[Any] , a: Dict , a: int , ):
__lowerCamelCase : Optional[Any] = latents.detach().requires_grad_()
__lowerCamelCase : str = self.scheduler.scale_model_input(a , a )
# predict the noise residual
__lowerCamelCase : Optional[int] = self.unet(a , a , encoder_hidden_states=a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowerCamelCase : str = self.scheduler.alphas_cumprod[timestep]
__lowerCamelCase : Dict = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Optional[int] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowerCamelCase : Optional[int] = torch.sqrt(a )
__lowerCamelCase : int = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a ):
__lowerCamelCase : str = self.scheduler.sigmas[index]
__lowerCamelCase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : Optional[int] = 1 / 0.1_8_2_1_5 * sample
__lowerCamelCase : Optional[Any] = self.vae.decode(a ).sample
__lowerCamelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Any = transforms.Resize(self.feature_extractor_size )(a )
__lowerCamelCase : Union[str, Any] = self.normalize(a ).to(latents.dtype )
__lowerCamelCase : Tuple = self.clip_model.get_image_features(a )
__lowerCamelCase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
__lowerCamelCase : List[str] = spherical_dist_loss(a , a ).mean() * clip_guidance_scale
__lowerCamelCase : Tuple = -torch.autograd.grad(a , a )[0]
if isinstance(self.scheduler , a ):
__lowerCamelCase : Optional[int] = latents.detach() + grads * (sigma**2)
__lowerCamelCase : List[Any] = noise_pred_original
else:
__lowerCamelCase : str = noise_pred_original - torch.sqrt(a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Any , a: Union[torch.FloatTensor, PIL.Image.Image] , a: Union[torch.FloatTensor, PIL.Image.Image] , a: Optional[str] = None , a: Optional[str] = None , a: Optional[int] = 512 , a: Optional[int] = 512 , a: float = 0.6 , a: Optional[int] = 50 , a: Optional[float] = 7.5 , a: Optional[int] = 1 , a: float = 0.0 , a: Optional[float] = 100 , a: Optional[torch.Generator] = None , a: Optional[str] = "pil" , a: bool = True , a: float = 0.8 , a: float = 0.1 , a: float = 0.1 , ):
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(a )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(a , torch.Generator ) and batch_size > 1:
__lowerCamelCase : List[Any] = [generator] + [None] * (batch_size - 1)
__lowerCamelCase : Dict = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
__lowerCamelCase : Any = [x[0] for x in coca_is_none if x[1]]
__lowerCamelCase : str = ', '.join(a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__lowerCamelCase : Any = self.get_image_description(a )
if style_prompt is None:
if len(a ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__lowerCamelCase : Tuple = self.get_image_description(a )
# get prompt text embeddings for content and style
__lowerCamelCase : int = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
__lowerCamelCase : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowerCamelCase : Union[str, Any] = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
__lowerCamelCase : Any = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowerCamelCase : List[Any] = slerp(a , a , a )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase : Any = text_embeddings.repeat_interleave(a , dim=0 )
# set timesteps
__lowerCamelCase : List[Any] = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowerCamelCase : Union[str, Any] = {}
if accepts_offset:
__lowerCamelCase : Dict = 1
self.scheduler.set_timesteps(a , **a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowerCamelCase , __lowerCamelCase : Dict = self.get_timesteps(a , a , self.device )
__lowerCamelCase : Tuple = timesteps[:1].repeat(a )
# Preprocess image
__lowerCamelCase : Any = preprocess(a , a , a )
__lowerCamelCase : str = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
__lowerCamelCase : Dict = preprocess(a , a , a )
__lowerCamelCase : Optional[int] = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
__lowerCamelCase : int = slerp(a , a , a )
if clip_guidance_scale > 0:
__lowerCamelCase : List[str] = self.get_clip_image_embeddings(a , a )
__lowerCamelCase : Union[str, Any] = self.get_clip_image_embeddings(a , a )
__lowerCamelCase : Union[str, Any] = slerp(
a , a , a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowerCamelCase : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase : Optional[int] = content_text_input.input_ids.shape[-1]
__lowerCamelCase : int = self.tokenizer([''] , padding='max_length' , max_length=a , return_tensors='pt' )
__lowerCamelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowerCamelCase : List[Any] = uncond_embeddings.repeat_interleave(a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase : int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowerCamelCase : str = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowerCamelCase : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowerCamelCase : Tuple = torch.randn(a , generator=a , device='cpu' , dtype=a ).to(
self.device )
else:
__lowerCamelCase : List[Any] = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__lowerCamelCase : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase : Dict = {}
if accepts_eta:
__lowerCamelCase : List[str] = eta
# check if the scheduler accepts generator
__lowerCamelCase : Optional[int] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowerCamelCase : Optional[Any] = generator
with self.progress_bar(total=a ):
for i, t in enumerate(a ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(a , a )
# predict the noise residual
__lowerCamelCase : Tuple = self.unet(a , a , encoder_hidden_states=a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase : str = noise_pred.chunk(2 )
__lowerCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowerCamelCase : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowerCamelCase , __lowerCamelCase : int = self.cond_fn(
a , a , a , a , a , a , a , )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : Tuple = self.scheduler.step(a , a , a , **a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
__lowerCamelCase : Union[str, Any] = self.vae.decode(a ).sample
__lowerCamelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : Union[str, Any] = self.numpy_to_pil(a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 194 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowercase__ : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 324 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_attention_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_choices
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_attention_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a )
lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(_a )[0]
lowerCamelCase = 50_000
lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
lowerCamelCase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 291 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__snake_case = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__snake_case = direct_transformers_import(PATH_TO_TRANSFORMERS)
__snake_case = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__snake_case = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : Tuple =False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
UpperCAmelCase : Optional[Any] =True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , __lowerCAmelCase , )
is not None
):
UpperCAmelCase : List[Any] =True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase : Optional[Any] =True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase : List[str] =[
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase : Optional[int] =['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase : Tuple =True
if not attribute_used:
UpperCAmelCase : Optional[Any] =False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase : Any =True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase : Optional[int] =True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase : List[str] =True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase : Dict =True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase : Tuple =SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase : Optional[Any] =allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase : Optional[int] =[x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase : List[Any] =[signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase : Tuple ={}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase : List[Any] ={v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase : Dict =inspect.getsourcefile(__lowerCAmelCase )
UpperCAmelCase : int =os.path.dirname(__lowerCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase : List[str] =[os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for fn in os.listdir(__lowerCAmelCase ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase : List[Any] =[]
for path in modeling_paths:
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase : int =[]
for config_param, default_value in zip(__lowerCAmelCase , __lowerCAmelCase ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase : Tuple =[config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(__lowerCAmelCase )
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] ={}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase : Tuple =[
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __lowerCAmelCase : inspect.isclass(__lowerCAmelCase )
and issubclass(__lowerCAmelCase , __lowerCAmelCase )
and inspect.getmodule(__lowerCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase : Dict =check_config_attributes_being_used(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
UpperCAmelCase : List[str] =unused_attributes
if len(__lowerCAmelCase ) > 0:
UpperCAmelCase : Union[str, Any] ='''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 78 | from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = """efficientnet"""
def __init__( self , snake_case__ = 3 , snake_case__ = 600 , snake_case__ = 2.0 , snake_case__ = 3.1 , snake_case__ = 8 , snake_case__ = [3, 3, 5, 3, 5, 5, 3] , snake_case__ = [32, 16, 24, 40, 80, 112, 192] , snake_case__ = [16, 24, 40, 80, 112, 192, 320] , snake_case__ = [] , snake_case__ = [1, 2, 2, 2, 1, 2, 1] , snake_case__ = [1, 2, 2, 3, 3, 4, 1] , snake_case__ = [1, 6, 6, 6, 6, 6, 6] , snake_case__ = 0.25 , snake_case__ = "swish" , snake_case__ = 2560 , snake_case__ = "mean" , snake_case__ = 0.02 , snake_case__ = 0.001 , snake_case__ = 0.99 , snake_case__ = 0.5 , snake_case__ = 0.2 , **snake_case__ , ) -> int:
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase : Tuple =num_channels
UpperCAmelCase : Any =image_size
UpperCAmelCase : Optional[int] =width_coefficient
UpperCAmelCase : Union[str, Any] =depth_coefficient
UpperCAmelCase : List[Any] =depth_divisor
UpperCAmelCase : List[str] =kernel_sizes
UpperCAmelCase : Any =in_channels
UpperCAmelCase : str =out_channels
UpperCAmelCase : Optional[int] =depthwise_padding
UpperCAmelCase : str =strides
UpperCAmelCase : Tuple =num_block_repeats
UpperCAmelCase : Union[str, Any] =expand_ratios
UpperCAmelCase : Dict =squeeze_expansion_ratio
UpperCAmelCase : Union[str, Any] =hidden_act
UpperCAmelCase : int =hidden_dim
UpperCAmelCase : Optional[int] =pooling_type
UpperCAmelCase : Union[str, Any] =initializer_range
UpperCAmelCase : List[str] =batch_norm_eps
UpperCAmelCase : List[str] =batch_norm_momentum
UpperCAmelCase : Tuple =dropout_rate
UpperCAmelCase : Tuple =drop_connect_rate
UpperCAmelCase : int =sum(snake_case__ ) * 4
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1e-5
| 78 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 'longformer'
def __init__( self, lowercase_ = 512, lowercase_ = 2, lowercase_ = 1, lowercase_ = 0, lowercase_ = 2, lowercase_ = 30522, lowercase_ = 768, lowercase_ = 12, lowercase_ = 12, lowercase_ = 3072, lowercase_ = "gelu", lowercase_ = 0.1, lowercase_ = 0.1, lowercase_ = 512, lowercase_ = 2, lowercase_ = 0.02, lowercase_ = 1E-12, lowercase_ = False, **lowercase_, ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_, **lowercase_ )
a__ =attention_window
a__ =sep_token_id
a__ =bos_token_id
a__ =eos_token_id
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_act
a__ =intermediate_size
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =initializer_range
a__ =layer_norm_eps
a__ =onnx_export
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_ = "default", lowercase_ = None ) -> Optional[int]:
"""simple docstring"""
super().__init__(lowercase_, lowercase_, lowercase_ )
a__ =True
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a__ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a__ ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a__ =super().outputs
if self.task == "default":
a__ ={0: '''batch'''}
return outputs
@property
def _UpperCAmelCase ( self ) -> float:
"""simple docstring"""
return 1E-4
@property
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
return max(super().default_onnx_opset, 14 )
def _UpperCAmelCase ( self, lowercase_, lowercase_ = -1, lowercase_ = -1, lowercase_ = False, lowercase_ = None, ) -> Mapping[str, Any]:
"""simple docstring"""
a__ =super().generate_dummy_inputs(
preprocessor=lowercase_, batch_size=lowercase_, seq_length=lowercase_, is_pair=lowercase_, framework=lowercase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
a__ =torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
a__ =1
return inputs
| 188 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 188 | 1 |
def a_ ( lowerCAmelCase_ : int = 100 ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i in range(1, n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 207 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def lowercase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=1_0 , )
return model
@property
def lowercase ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
__lowerCAmelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = AudioDiffusionPipeline(vqvae=lowerCAmelCase_ , unet=self.dummy_unet , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 , return_dict=lowerCAmelCase_ )
__lowerCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = self.dummy_vqvae_and_unet
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(raw_audio=lowerCAmelCase_ , generator=lowerCAmelCase_ , start_step=5 , steps=1_0 )
__lowerCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = self.dummy_unet_condition
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCAmelCase_ , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = torch.rand((1, 1, 1_0) )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , encoding=lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 207 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__UpperCamelCase = 5_0000
__UpperCamelCase = 5000
__UpperCamelCase , __UpperCamelCase = os.path.split(__file__)
__UpperCamelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
for i in range(UpperCAmelCase ):
snake_case_ = dataset[i]
@get_duration
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase ):
snake_case_ = dataset[i : i + batch_size]
@get_duration
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
with dataset.formatted_as(type=UpperCAmelCase ):
for i in range(UpperCAmelCase ):
snake_case_ = dataset[i]
@get_duration
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
with dataset.formatted_as(type=UpperCAmelCase ):
for i in range(0 , UpperCAmelCase , UpperCAmelCase ):
snake_case_ = dataset[i : i + batch_size]
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES}
snake_case_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
snake_case_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
snake_case_ = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
snake_case_ = generate_example_dataset(
os.path.join(UpperCAmelCase , 'dataset.arrow' ) , UpperCAmelCase , num_examples=UpperCAmelCase , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(UpperCAmelCase ) )
snake_case_ = func(UpperCAmelCase , **UpperCAmelCase )
print('shuffling dataset' )
snake_case_ = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(UpperCAmelCase ) )
snake_case_ = func(
UpperCAmelCase , **UpperCAmelCase )
with open(UpperCAmelCase , 'wb' ) as f:
f.write(json.dumps(UpperCAmelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 69 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 125 | 0 |
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> str:
__A : int = len(__snake_case )
__A : int = len(__snake_case )
__A : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__A : list = []
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''') | 361 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( __snake_case : int , __snake_case : int = 2 , __snake_case : int = 1 , __snake_case : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__snake_case : int , __snake_case : int , __snake_case : int ) -> int:
return (pow(__snake_case , 2 ) + step) % modulus
for _ in range(__snake_case ):
# These track the position within the cycle detection logic.
__A : int = seed
__A : Union[str, Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__A : List[Any] = rand_fn(__snake_case , __snake_case , __snake_case )
__A : Optional[Any] = rand_fn(__snake_case , __snake_case , __snake_case )
__A : Any = rand_fn(__snake_case , __snake_case , __snake_case )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__A : Optional[int] = gcd(hare - tortoise , __snake_case )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__A : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : int = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
lowercase__ : List[str] = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""") | 190 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = checkpoint
UpperCAmelCase_ : Optional[Any] = {}
UpperCAmelCase_ : str = vae_state_dict["""encoder.conv_in.weight"""]
UpperCAmelCase_ : int = vae_state_dict["""encoder.conv_in.bias"""]
UpperCAmelCase_ : Dict = vae_state_dict["""encoder.conv_out.weight"""]
UpperCAmelCase_ : Tuple = vae_state_dict["""encoder.conv_out.bias"""]
UpperCAmelCase_ : List[Any] = vae_state_dict["""encoder.norm_out.weight"""]
UpperCAmelCase_ : Tuple = vae_state_dict["""encoder.norm_out.bias"""]
UpperCAmelCase_ : Optional[int] = vae_state_dict["""decoder.conv_in.weight"""]
UpperCAmelCase_ : Tuple = vae_state_dict["""decoder.conv_in.bias"""]
UpperCAmelCase_ : Union[str, Any] = vae_state_dict["""decoder.conv_out.weight"""]
UpperCAmelCase_ : Tuple = vae_state_dict["""decoder.conv_out.bias"""]
UpperCAmelCase_ : Tuple = vae_state_dict["""decoder.norm_out.weight"""]
UpperCAmelCase_ : List[str] = vae_state_dict["""decoder.norm_out.bias"""]
UpperCAmelCase_ : Optional[int] = vae_state_dict["""quant_conv.weight"""]
UpperCAmelCase_ : List[Any] = vae_state_dict["""quant_conv.bias"""]
UpperCAmelCase_ : Optional[Any] = vae_state_dict["""post_quant_conv.weight"""]
UpperCAmelCase_ : int = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ : Any = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ : Any = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ : int = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ : str = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : Any = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ : List[str] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ : Optional[Any] = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ : str = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
UpperCAmelCase_ : List[str] = [key for key in vae_state_dict if """encoder.mid.block""" in key]
UpperCAmelCase_ : Union[str, Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : str = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ : str = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ : str = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
UpperCAmelCase_ : str = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
UpperCAmelCase_ : int = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ : List[str] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ : List[str] = num_up_blocks - 1 - i
UpperCAmelCase_ : List[str] = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : Union[str, Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ : List[str] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ : str = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ : int = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
UpperCAmelCase_ : Tuple = [key for key in vae_state_dict if """decoder.mid.block""" in key]
UpperCAmelCase_ : Tuple = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : int = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ : Any = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ : List[str] = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
UpperCAmelCase_ : Union[str, Any] = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
UpperCAmelCase_ : Any = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ : Optional[int] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def __a ( __lowerCamelCase, __lowerCamelCase, ):
# Only support V1
UpperCAmelCase_ : Tuple = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ : Tuple = io.BytesIO(r.content )
UpperCAmelCase_ : List[Any] = OmegaConf.load(snake_case_ )
UpperCAmelCase_ : Optional[int] = 512
UpperCAmelCase_ : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ : Tuple = {}
with safe_open(snake_case_, framework="pt", device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ : int = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ : int = torch.load(snake_case_, map_location=snake_case_ )["""state_dict"""]
# Convert the VAE model.
UpperCAmelCase_ : List[str] = create_vae_diffusers_config(snake_case_, image_size=snake_case_ )
UpperCAmelCase_ : Any = custom_convert_ldm_vae_checkpoint(snake_case_, snake_case_ )
UpperCAmelCase_ : List[Any] = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_a = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 61 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "token_type_ids"]
_a = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_A : Optional[int] = do_lower_case
_A : List[Any] = remove_space
_A : str = keep_accents
_A : int = vocab_file
_A : int = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Any = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 26 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(1_25.50, 0.05) = }''')
| 249 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Any = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 249 | 1 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Dict = np.inf
def set_batch_size(snake_case ) -> None:
nonlocal batch_size
if isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = min(snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Any = min(snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(snake_case , snake_case ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE:Optional[int] = min(snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(snake_case , snake_case )
return None if batch_size is np.inf else batch_size
class _snake_case ( _a ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] ,SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None ,SCREAMING_SNAKE_CASE__ : Optional[Features] = None ,SCREAMING_SNAKE_CASE__ : str = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(
SCREAMING_SNAKE_CASE__ ,split=SCREAMING_SNAKE_CASE__ ,features=SCREAMING_SNAKE_CASE__ ,cache_dir=SCREAMING_SNAKE_CASE__ ,keep_in_memory=SCREAMING_SNAKE_CASE__ ,streaming=SCREAMING_SNAKE_CASE__ ,num_proc=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:List[Any] = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE:Dict = _PACKAGED_DATASETS_MODULES["parquet"][1]
SCREAMING_SNAKE_CASE:int = Parquet(
cache_dir=SCREAMING_SNAKE_CASE__ ,data_files=SCREAMING_SNAKE_CASE__ ,features=SCREAMING_SNAKE_CASE__ ,hash=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def __UpperCamelCase ( self : Dict ):
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE:List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE:int = None
SCREAMING_SNAKE_CASE:Optional[int] = None
SCREAMING_SNAKE_CASE:Optional[Any] = None
SCREAMING_SNAKE_CASE:Any = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ ,download_mode=SCREAMING_SNAKE_CASE__ ,verification_mode=SCREAMING_SNAKE_CASE__ ,base_path=SCREAMING_SNAKE_CASE__ ,num_proc=self.num_proc ,)
SCREAMING_SNAKE_CASE:Any = self.builder.as_dataset(
split=self.split ,verification_mode=SCREAMING_SNAKE_CASE__ ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Dataset ,SCREAMING_SNAKE_CASE__ : Union[PathLike, BinaryIO] ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,**SCREAMING_SNAKE_CASE__ : Any ,):
SCREAMING_SNAKE_CASE:Tuple = dataset
SCREAMING_SNAKE_CASE:Optional[int] = path_or_buf
SCREAMING_SNAKE_CASE:Any = batch_size or get_writer_batch_size(dataset.features )
SCREAMING_SNAKE_CASE:Optional[int] = parquet_writer_kwargs
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:Dict = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"wb+" ) as buffer:
SCREAMING_SNAKE_CASE:List[Any] = self._write(file_obj=SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,**self.parquet_writer_kwargs )
else:
SCREAMING_SNAKE_CASE:Dict = self._write(file_obj=self.path_or_buf ,batch_size=SCREAMING_SNAKE_CASE__ ,**self.parquet_writer_kwargs )
return written
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : BinaryIO ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:List[Any] = 0
SCREAMING_SNAKE_CASE:Tuple = parquet_writer_kwargs.pop("path_or_buf" ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE:Union[str, Any] = pq.ParquetWriter(SCREAMING_SNAKE_CASE__ ,schema=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,SCREAMING_SNAKE_CASE__ ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,):
SCREAMING_SNAKE_CASE:Optional[Any] = query_table(
table=self.dataset._data ,key=slice(SCREAMING_SNAKE_CASE__ ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(SCREAMING_SNAKE_CASE__ )
written += batch.nbytes
writer.close()
return written
| 139 |
'''simple docstring'''
from __future__ import annotations
A_ = list[list[int]]
# assigning initial values to the grid
A_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A_ ( snake_case , snake_case , snake_case , snake_case ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A_ ( snake_case ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A_ ( snake_case ):
if location := find_empty_location(snake_case ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:List[str] = digit
if sudoku(snake_case ) is not None:
return grid
SCREAMING_SNAKE_CASE:List[Any] = 0
return None
def A_ ( snake_case ):
for row in grid:
for cell in row:
print(snake_case , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
A_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 139 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def A ( _lowercase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowercase__ ( nn.Module):
def __init__( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = module
SCREAMING_SNAKE_CASE : int = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__ ) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__ ) , )
SCREAMING_SNAKE_CASE : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self : Any , UpperCamelCase__ : Dict , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) + self.adapter(UpperCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase):
UpperCamelCase_ = """bigscience/bloom-1b7"""
# Constant values
UpperCamelCase_ = 2.109_659_552_692_574
UpperCamelCase_ = """Hello my name is"""
UpperCamelCase_ = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""")
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""")
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""")
UpperCamelCase_ = 10
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(self.model_name )
class lowercase__ ( UpperCamelCase_):
def __A ( self : Tuple ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map='''auto''' )
def __A ( self : Dict ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase__ , '''quantization_config''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = config.to_dict()
SCREAMING_SNAKE_CASE : List[str] = config.to_diff_dict()
SCREAMING_SNAKE_CASE : Optional[int] = config.to_json_string()
def __A ( self : List[str] ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE : Dict = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE : Dict = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE : str = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , device_map='''auto''' )
SCREAMING_SNAKE_CASE : int = self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
def __A ( self : Tuple ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __A ( self : Tuple ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Any = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE : int = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
SCREAMING_SNAKE_CASE : Any = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE : Optional[int] = self.model_fpaa.float()
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=UpperCamelCase__ , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase):
@classmethod
def __A ( cls : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 't5-small'
SCREAMING_SNAKE_CASE : Optional[int] = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE : Optional[int] = 'Translate in German: Hello, my dog is cute'
def __A ( self : Optional[int] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Tuple ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE : Dict = None
# test with `t5-small`
SCREAMING_SNAKE_CASE : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map='''auto''' )
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE : List[Any] = model.generate(**UpperCamelCase__ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map='''auto''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE : str = model.generate(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = modules
def __A ( self : Optional[int] ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(**UpperCamelCase__ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map='''auto''' )
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE : List[Any] = model.generate(**UpperCamelCase__ )
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# model_name
SCREAMING_SNAKE_CASE : Optional[int] = 'bigscience/bloom-560m'
SCREAMING_SNAKE_CASE : Tuple = 't5-small'
# Different types of model
SCREAMING_SNAKE_CASE : str = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map='''auto''' )
# Sequence classification model
SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map='''auto''' )
# CausalLM model
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map='''auto''' )
# Seq2seq model
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map='''auto''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self : str ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowercase__ ( UpperCamelCase_):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
def __A ( self : Dict ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowercase__ ( UpperCamelCase_):
def __A ( self : Tuple ):
'''simple docstring'''
super().setUp()
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase__ , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
SCREAMING_SNAKE_CASE : Optional[int] = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__ ) , self.EXPECTED_OUTPUTS )
class lowercase__ ( UpperCamelCase_):
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 'facebook/opt-350m'
super().setUp()
def __A ( self : List[str] ):
'''simple docstring'''
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE : Optional[int] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE : Union[str, Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : str = LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE : List[str] = LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE : Dict = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE : Optional[Any] = model.forward(**UpperCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(UpperCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """gpt2-xl"""
UpperCamelCase_ = 3.3_191_854_854_152_187
| 362 | import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Dict = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : Any = downstream_dict['''projector.weight''']
SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict['''projector.bias''']
SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict['''model.post_net.linear.weight''']
SCREAMING_SNAKE_CASE : int = downstream_dict['''model.post_net.linear.bias''']
return model
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict['''model.linear.weight''']
SCREAMING_SNAKE_CASE : str = downstream_dict['''model.linear.bias''']
return model
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = UniSpeechSatForXVector.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : str = downstream_dict['''connector.weight''']
SCREAMING_SNAKE_CASE : Dict = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
SCREAMING_SNAKE_CASE : List[str] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
SCREAMING_SNAKE_CASE : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
SCREAMING_SNAKE_CASE : Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
SCREAMING_SNAKE_CASE : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
SCREAMING_SNAKE_CASE : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
SCREAMING_SNAKE_CASE : Any = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_lowercase , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Any = checkpoint['''Downstream''']
SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained(
_lowercase , return_attention_mask=_lowercase , do_normalize=_lowercase )
SCREAMING_SNAKE_CASE : Tuple = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
SCREAMING_SNAKE_CASE : str = convert_classification(_lowercase , _lowercase , _lowercase )
elif arch.endswith('''ForAudioFrameClassification''' ):
SCREAMING_SNAKE_CASE : List[Any] = convert_diarization(_lowercase , _lowercase , _lowercase )
elif arch.endswith('''ForXVector''' ):
SCREAMING_SNAKE_CASE : int = convert_xvector(_lowercase , _lowercase , _lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE : int = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 258 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : List[str] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class A_ ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Dict ) -> Any:
UpperCAmelCase = load_tool('text-to-speech' )
self.tool.setup()
def UpperCAmelCase__ ( self :List[str] ) -> Tuple:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase = self.tool('hey' )
UpperCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def UpperCAmelCase__ ( self :Optional[int] ) -> Any:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase = self.tool('hey' )
UpperCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 181 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ):
UpperCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowercase_ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowercase_ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowercase_ )
return parser.parse_args()
def _lowerCAmelCase ( ):
UpperCAmelCase = parse_args()
# Import training_script as a module.
UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase = script_fpath.stem
UpperCAmelCase = importlib.import_module(lowercase_ )
# Patch sys.argv
UpperCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 181 | 1 |
def _a ( SCREAMING_SNAKE_CASE : list[list] ):
"""simple docstring"""
UpperCamelCase__ : List[str] = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
if magnitude == 0:
UpperCamelCase__ : int = column
continue
UpperCamelCase__ : List[Any] = column / magnitude
# Subtract to cancel term
UpperCamelCase__ : str = current_set[0]
UpperCamelCase__ : Tuple = [first_row]
UpperCamelCase__ : str = current_set[1::]
for row in current_set:
UpperCamelCase__ : List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCamelCase__ : Optional[int] = final_set[0]
UpperCamelCase__ : str = []
UpperCamelCase__ : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCamelCase__ : int = simplify(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = resultant
return final_set
def _a ( SCREAMING_SNAKE_CASE : list[list] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
UpperCamelCase__ : Optional[int] = len(SCREAMING_SNAKE_CASE ) + 1
if any(len(SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCamelCase__ : Dict = equations.copy()
if any(0 in row for row in data_set ):
UpperCamelCase__ : Any = data_set.copy()
UpperCamelCase__ : Any = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
if 0 not in row:
UpperCamelCase__ : Union[str, Any] = data_set.pop(SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = data_set.copy()
UpperCamelCase__ : Optional[Any] = simplify(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = simplified[::-1]
UpperCamelCase__ : list = []
for row in simplified:
UpperCamelCase__ : Dict = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCamelCase__ : Tuple = row.copy()[: len(SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
UpperCamelCase__ : Union[str, Any] = temp_row[1::]
UpperCamelCase__ : Any = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : List[str] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 146 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __magic_name__ ( datasets.BeamBasedBuilder):
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=lowerCamelCase__ , )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict ) -> str:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase__ )
class __magic_name__ ( datasets.BeamBasedBuilder):
def UpperCAmelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=lowerCamelCase__ , )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict ) -> int:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase__ )
def _a ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def _a ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class __magic_name__ ( __lowerCAmelCase):
@require_beam
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : List[str] = DummyBeamDataset(cache_dir=lowerCamelCase__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCamelCase__ : Any = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCamelCase__ )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
import apache_beam as beam
UpperCamelCase__ : List[Any] = beam.io.parquetio.WriteToParquet
UpperCamelCase__ : str = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : List[Any] = DummyBeamDataset(cache_dir=lowerCamelCase__ , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCamelCase__ : Any = partial(lowerCamelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCamelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : List[Any] = DummyBeamDataset(cache_dir=lowerCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : Tuple = NestedBeamDataset(cache_dir=lowerCamelCase__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCamelCase__ )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 146 | 1 |
"""simple docstring"""
_lowerCAmelCase :List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str ):
# Return True if there is node that has not iterated.
_UpperCAmelCase : Union[str, Any] = [False] * len(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = [s]
_UpperCAmelCase : Optional[int] = True
while queue:
_UpperCAmelCase : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase__ )
_UpperCAmelCase : str = True
_UpperCAmelCase : int = u
return visited[t]
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
_UpperCAmelCase : Optional[int] = [-1] * (len(UpperCamelCase__ ))
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : List[str] = float('''Inf''' )
_UpperCAmelCase : List[str] = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase : Dict = min(UpperCamelCase__ , graph[parent[s]][s] )
_UpperCAmelCase : int = parent[s]
max_flow += path_flow
_UpperCAmelCase : Any = sink
while v != source:
_UpperCAmelCase : str = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase : Optional[Any] = parent[v]
for i in range(len(UpperCamelCase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 355 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCAmelCase :str = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
a__ =field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
a__ =field(
default=1_2_8 ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} ,)
a__ =field(
default=a ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : str = self.task_name.lower()
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''train'''
a__ ='''dev'''
a__ ='''test'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =42
a__ =42
a__ =42
def __init__( self , A , A , A = None , A = Split.train , A = None , ) -> Dict:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , A , )
_UpperCAmelCase : Dict = args
_UpperCAmelCase : int = glue_processors[args.task_name]()
_UpperCAmelCase : Any = glue_output_modes[args.task_name]
if isinstance(A , A ):
try:
_UpperCAmelCase : int = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_UpperCAmelCase : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
_UpperCAmelCase : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = label_list[2], label_list[1]
_UpperCAmelCase : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : List[Any] = cached_features_file + '''.lock'''
with FileLock(A ):
if os.path.exists(A ) and not args.overwrite_cache:
_UpperCAmelCase : str = time.time()
_UpperCAmelCase : Dict = torch.load(A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
_UpperCAmelCase : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_UpperCAmelCase : List[Any] = self.processor.get_test_examples(args.data_dir )
else:
_UpperCAmelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_UpperCAmelCase : Tuple = examples[:limit_length]
_UpperCAmelCase : str = glue_convert_examples_to_features(
A , A , max_length=args.max_seq_length , label_list=A , output_mode=self.output_mode , )
_UpperCAmelCase : Optional[int] = time.time()
torch.save(self.features , A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self , A ) -> InputFeatures:
return self.features[i]
def __lowerCAmelCase ( self ) -> List[Any]:
return self.label_list
| 68 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCamelCase = Features({"text": Value("string" )} )
_lowerCamelCase = Features({"labels": ClassLabel} )
_lowerCamelCase = "text"
_lowerCamelCase = "labels"
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCamelCase_ = copy.deepcopy(self )
lowerCamelCase_ = self.label_schema.copy()
lowerCamelCase_ = features[self.label_column]
lowerCamelCase_ = label_schema
return task_template
@property
def snake_case ( self ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 55 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = set()
# Replace all the whitespace in our sentence
__a = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCAmelCase__ ) == 26
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = [False] * 26
for char in input_str:
if char.islower():
__a = True
elif char.isupper():
__a = True
return all(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowercase ( ) -> None:
from timeit import timeit
__a = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=lowerCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 45 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[str] = 'roformer'
def __init__( self ,__UpperCAmelCase=5_00_00 ,__UpperCAmelCase=None ,__UpperCAmelCase=7_68 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=30_72 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=15_36 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1e-12 ,__UpperCAmelCase=0 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> Dict:
super().__init__(pad_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
A__ = vocab_size
A__ = hidden_size if embedding_size is None else embedding_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = rotary_value
A__ = use_cache
class UpperCamelCase__( __A ):
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 154 | """simple docstring"""
from functools import lru_cache
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = 2
A__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase__ )
if n > 1:
factors.add(UpperCamelCase__ )
return factors
@lru_cache
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return len(unique_prime_factors(UpperCamelCase__ ) )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return len(set(UpperCamelCase__ ) ) in (0, 1)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = 2
while True:
# Increment each value of a generated range
A__ = [base + i for i in range(UpperCamelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
A__ = [upf_len(UpperCamelCase__ ) for x in group]
checker.append(UpperCamelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase__ ):
return group
# Increment our base variable by 1
base += 1
def UpperCAmelCase ( UpperCamelCase__ = 4 ):
"""simple docstring"""
A__ = run(UpperCamelCase__ )
return results[0] if len(UpperCamelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 154 | 1 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return x + 2
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = 'x = 3'
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ ,{} ,state=SCREAMING_SNAKE_CASE__)
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3})
__lowerCamelCase : List[Any] = 'x = y'
__lowerCamelCase : str = {'y': 5}
__lowerCamelCase : Dict = evaluate(SCREAMING_SNAKE_CASE__ ,{} ,state=SCREAMING_SNAKE_CASE__)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 5, 'y': 5})
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = 'y = add_two(x)'
__lowerCamelCase : Optional[Any] = {'x': 3}
__lowerCamelCase : Dict = evaluate(SCREAMING_SNAKE_CASE__ ,{'add_two': add_two} ,state=SCREAMING_SNAKE_CASE__)
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3, 'y': 5})
# Won't work without the tool
with CaptureStdout() as out:
__lowerCamelCase : str = evaluate(SCREAMING_SNAKE_CASE__ ,{} ,state=SCREAMING_SNAKE_CASE__)
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = 'x = 3'
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Optional[int] = evaluate(SCREAMING_SNAKE_CASE__ ,{} ,state=SCREAMING_SNAKE_CASE__)
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3})
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Any = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
__lowerCamelCase : Dict = {'x': 3}
__lowerCamelCase : Optional[int] = evaluate(SCREAMING_SNAKE_CASE__ ,{'add_two': add_two} ,state=SCREAMING_SNAKE_CASE__)
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3, 'y': 5})
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}})
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Union[str, Any] = 'x = 3\ny = 5'
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = evaluate(SCREAMING_SNAKE_CASE__ ,{} ,state=SCREAMING_SNAKE_CASE__)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3, 'y': 5})
def lowerCAmelCase ( self : str):
__lowerCamelCase : int = 'text = f\'This is x: {x}.\''
__lowerCamelCase : Optional[int] = {'x': 3}
__lowerCamelCase : str = evaluate(SCREAMING_SNAKE_CASE__ ,{} ,state=SCREAMING_SNAKE_CASE__)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3, 'text': 'This is x: 3.'})
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = 'if x <= 3:\n y = 2\nelse:\n y = 5'
__lowerCamelCase : Union[str, Any] = {'x': 3}
__lowerCamelCase : Tuple = evaluate(SCREAMING_SNAKE_CASE__ ,{} ,state=SCREAMING_SNAKE_CASE__)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3, 'y': 2})
__lowerCamelCase : Any = {'x': 8}
__lowerCamelCase : Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ ,{} ,state=SCREAMING_SNAKE_CASE__)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 8, 'y': 5})
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = 'test_list = [x, add_two(x)]'
__lowerCamelCase : List[Any] = {'x': 3}
__lowerCamelCase : Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ ,{'add_two': add_two} ,state=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[3, 5])
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3, 'test_list': [3, 5]})
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : List[Any] = 'y = x'
__lowerCamelCase : Dict = {'x': 3}
__lowerCamelCase : Any = evaluate(SCREAMING_SNAKE_CASE__ ,{} ,state=SCREAMING_SNAKE_CASE__)
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3, 'y': 3})
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Optional[int] = 'test_list = [x, add_two(x)]\ntest_list[1]'
__lowerCamelCase : List[Any] = {'x': 3}
__lowerCamelCase : Any = evaluate(SCREAMING_SNAKE_CASE__ ,{'add_two': add_two} ,state=SCREAMING_SNAKE_CASE__)
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3, 'test_list': [3, 5]})
__lowerCamelCase : Optional[int] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
__lowerCamelCase : Union[str, Any] = {'x': 3}
__lowerCamelCase : Any = evaluate(SCREAMING_SNAKE_CASE__ ,{'add_two': add_two} ,state=SCREAMING_SNAKE_CASE__)
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}})
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[Any] = 'x = 0\nfor i in range(3):\n x = i'
__lowerCamelCase : List[str] = {}
__lowerCamelCase : List[Any] = evaluate(SCREAMING_SNAKE_CASE__ ,{'range': range} ,state=SCREAMING_SNAKE_CASE__)
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{'x': 2, 'i': 2})
| 73 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : list[tuple[float, float]]):
__lowerCamelCase : Union[str, Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__) - 1
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree ,SCREAMING_SNAKE_CASE__) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(SCREAMING_SNAKE_CASE__) ,5) == 1
return output_values
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCamelCase : Tuple = self.basis_function(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 0.0
__lowerCamelCase : Optional[Any] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : float = 0.01):
from matplotlib import pyplot as plt # type: ignore
__lowerCamelCase : list[float] = [] # x coordinates of points to plot
__lowerCamelCase : list[float] = [] # y coordinates of points to plot
__lowerCamelCase : Any = 0.0
while t <= 1:
__lowerCamelCase : List[Any] = self.bezier_curve_function(SCREAMING_SNAKE_CASE__)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
__lowerCamelCase : Optional[Any] = [i[0] for i in self.list_of_points]
__lowerCamelCase : List[str] = [i[1] for i in self.list_of_points]
plt.plot(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='blue' ,label='Curve of Degree ' + str(self.degree) ,)
plt.scatter(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,color='red' ,label='Control Points')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 73 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 163 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 163 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
UpperCAmelCase : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
UpperCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __magic_name__ ( self : List[Any] ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase : Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a, env=os.environ.copy() )
@require_multi_gpu
def __magic_name__ ( self : Tuple ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase : Dict = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a, env=os.environ.copy() )
@require_multi_gpu
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a, env=os.environ.copy() )
@require_multi_gpu
def __magic_name__ ( self : Dict ):
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
UpperCAmelCase : Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1, cuda_visible_devices='''0,1''' ):
execute_subprocess_async(__a, env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : Optional[Any] = (accelerator.state.process_index + 2, 1_0)
_lowerCamelCase : str = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCamelCase : Union[str, Any] = ""
_lowerCamelCase : List[str] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : str = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : str = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 336 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
return (-y * np.log(__snake_case ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = np.dot(__snake_case, __snake_case )
return np.sum(y * scores - np.log(1 + np.exp(__snake_case ) ) )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=7_00_00 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(__snake_case ):
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = np.dot(x.T, h - y ) / y.size
_UpperCamelCase = theta - alpha * gradient # updating the weights
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = cost_function(__snake_case, __snake_case )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_a = datasets.load_iris()
_a = iris.data[:, :2]
_a = (iris.target != 0) * 1
_a = 0.1
_a = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return sigmoid_function(
np.dot(__snake_case, __snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((_a) , (_a)) = (x[:, 0].min(), x[:, 0].max())
((_a) , (_a)) = (x[:, 1].min(), x[:, 1].max())
((_a) , (_a)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_a = np.c_[xxa.ravel(), xxa.ravel()]
_a = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 194 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = self.dummy_uncond_unet
snake_case_ = KarrasVeScheduler()
snake_case_ = KarrasVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(num_inference_steps=2 , generator=lowerCamelCase , output_type="""numpy""" ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(num_inference_steps=2 , generator=lowerCamelCase , output_type="""numpy""" , return_dict=lowerCamelCase )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = """google/ncsnpp-celebahq-256"""
snake_case_ = UNetaDModel.from_pretrained(lowerCamelCase )
snake_case_ = KarrasVeScheduler()
snake_case_ = KarrasVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(num_inference_steps=20 , generator=lowerCamelCase , output_type="""numpy""" ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 360 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def UpperCamelCase( lowercase_ ) -> Any:
'''simple docstring'''
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def UpperCamelCase( ) -> str:
'''simple docstring'''
snake_case_ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=lowercase_ )
snake_case_ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowercase_ )
EnvironmentCommand.register_subcommand(lowercase_ )
TestCommand.register_subcommand(lowercase_ )
RunBeamCommand.register_subcommand(lowercase_ )
DummyDataCommand.register_subcommand(lowercase_ )
# Parse args
snake_case_ , snake_case_ = parser.parse_known_args()
if not hasattr(lowercase_ , """func""" ):
parser.print_help()
exit(1 )
snake_case_ = parse_unknown_args(lowercase_ )
# Run
snake_case_ = args.func(lowercase_ , **lowercase_ )
service.run()
if __name__ == "__main__":
main() | 34 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , *lowercase_ :str , **lowercase_ :List[Any] ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self :List[Any] , lowercase_ :Union["Image.Image", str] , lowercase_ :str = None , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = {'image': image, 'question': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase_ , **lowercase_ )
return results
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :int=False , lowercase_ :Optional[int]=False ) -> Union[str, Any]:
UpperCAmelCase = load_image(inputs['image'] )
UpperCAmelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> Any:
UpperCAmelCase = self.model(**lowercase_ )
return model_outputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :List[Any]=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase_ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 78 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCAmelCase : Optional[int] =16
__lowerCAmelCase : List[str] =32
def UpperCamelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : int = 16 , _lowerCamelCase : str = "bert-base-cased" ):
A__ = AutoTokenizer.from_pretrained(_lowerCamelCase )
A__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase : int ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(_lowerCamelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Any ):
model.eval()
A__ = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__, A__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCamelCase ) - 1:
A__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
A__ = metric.compute()
return eval_metric["accuracy"]
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] ):
# Initialize accelerator
A__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = args.model_name_or_path
set_seed(_lowerCamelCase )
A__, A__ = get_dataloaders(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
# Instantiate optimizer
A__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ = optimizer_cls(params=model.parameters() , lr=_lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
A__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
A__ = 1
A__ = (len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=0 , num_training_steps=_lowerCamelCase , )
else:
A__ = DummyScheduler(_lowerCamelCase , total_num_steps=_lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__, A__, A__, A__, A__ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the stating epoch so files are named properly
A__ = 0
A__ = evaluate.load("glue" , "mrpc" )
A__ = num_epochs
if args.partial_train_epoch is not None:
A__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A__ = args.resume_from_checkpoint.split("epoch_" )[1]
A__ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A__ = int(_lowerCamelCase ) + 1
A__ = evaluation_loop(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.print("resumed checkpoint performance:" , _lowerCamelCase )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F"state_{starting_epoch-1}.json" ) , "r" ) as f:
A__ = json.load(_lowerCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A__ = {}
for epoch in range(_lowerCamelCase , _lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
A__ = model(**_lowerCamelCase )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A__ = F"epoch_{epoch}"
A__ = os.path.join(args.output_dir , _lowerCamelCase )
accelerator.save_state(_lowerCamelCase )
A__ = evaluation_loop(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = accuracy
A__ = lr_scheduler.get_lr()[0]
A__ = optimizer.param_groups[0]["lr"]
A__ = epoch
A__ = overall_step
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"state_{epoch}.json" ) , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( ):
A__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_lowerCamelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCamelCase , )
parser.add_argument(
"--output_dir" , type=_lowerCamelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_lowerCamelCase , default=_lowerCamelCase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=_lowerCamelCase , default=_lowerCamelCase , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=_lowerCamelCase , default=2 , help="Number of train epochs." , )
A__ = parser.parse_args()
A__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 352 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : Tuple ):
A__ = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
A__ = 10_24
A__ = 40_96
A__ = 24
A__ = 16
A__ = [5, 11, 17, 23]
A__ = [2_56, 5_12, 10_24, 10_24]
A__ = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
A__ = 7_68
A__ = [1, 1, 1, 0.5]
A__ = [2_56, 5_12, 7_68, 7_68]
A__ = 1_50
A__ = 16
A__ = (1, 3_84, 3_84)
A__ = False
A__ = "project"
if "ade" in checkpoint_url:
A__ = True
A__ = 7_68
A__ = [1, 1, 1, 0.5]
A__ = 1_50
A__ = 16
A__ = "huggingface/label-files"
A__ = "ade20k-id2label.json"
A__ = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
A__ = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : int ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
A__ = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
A__ = name.replace("patch_embed" , "" )
if "pos_embed" in name:
A__ = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
A__ = name.replace("proj" , "projection" )
if "blocks" in name:
A__ = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
A__ = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
A__ = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
A__ = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
A__ = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
A__ = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
A__ = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
A__ = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
A__ = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
A__ = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
A__ = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
A__ = name.replace("conv1" , "convolution1" )
if "conv2" in name:
A__ = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
A__ = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
A__ = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
A__ = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
A__ = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
A__ = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
A__ = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
A__ = name.replace("pretrained" , "dpt" )
if "bn" in name:
A__ = name.replace("bn" , "batch_norm" )
if "head" in name:
A__ = name.replace("head" , "head.head" )
if "encoder.norm" in name:
A__ = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
A__ = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
A__ = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
A__ = name.replace(".." , "." )
if "stem.conv" in name:
A__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
A__ = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
A__ = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
A__ = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
A__ = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
A__ = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
A__ = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : str ):
A__, A__ = get_dpt_config(_lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(_lowerCamelCase )
A__ = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
A__ = DPTForSemanticSegmentation(_lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Check outputs on an image
A__ = 4_80 if "ade" in checkpoint_url else 3_84
A__ = DPTImageProcessor(size=_lowerCamelCase )
A__ = prepare_img()
A__ = image_processor(_lowerCamelCase , return_tensors="pt" )
# forward pass
A__ = model(**_lowerCamelCase ).logits if "ade" in checkpoint_url else model(**_lowerCamelCase ).predicted_depth
if show_prediction:
A__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 123 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 207 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = """pixel_values"""
lowercase__ = False
lowercase__ = TimmBackboneConfig
def __init__( self : Tuple, lowerCamelCase : List[str], **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self, '''timm''' )
super().__init__(lowerCamelCase )
lowercase__ = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase, '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowercase__ = getattr(lowerCamelCase, '''use_pretrained_backbone''', lowerCamelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase__ = config.out_indices if getattr(lowerCamelCase, '''out_indices''', lowerCamelCase ) is not None else (-1,)
lowercase__ = timm.create_model(
config.backbone, pretrained=lowerCamelCase, features_only=config.features_only, in_chans=config.num_channels, out_indices=lowerCamelCase, **lowerCamelCase, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase__ = self._backbone.return_layers
lowercase__ = {layer['''module''']: str(lowerCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase )
@classmethod
def lowercase__ ( cls : List[str], lowerCamelCase : List[str], *lowerCamelCase : Optional[int], **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls, ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase__ = kwargs.pop('''config''', TimmBackboneConfig() )
lowercase__ = kwargs.pop('''use_timm_backbone''', lowerCamelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowercase__ = kwargs.pop('''num_channels''', config.num_channels )
lowercase__ = kwargs.pop('''features_only''', config.features_only )
lowercase__ = kwargs.pop('''use_pretrained_backbone''', config.use_pretrained_backbone )
lowercase__ = kwargs.pop('''out_indices''', config.out_indices )
lowercase__ = TimmBackboneConfig(
backbone=lowerCamelCase, num_channels=lowerCamelCase, features_only=lowerCamelCase, use_pretrained_backbone=lowerCamelCase, out_indices=lowerCamelCase, )
return super()._from_config(lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : int, lowerCamelCase : int, lowerCamelCase : Optional[int]=None, lowerCamelCase : List[Any]=None, lowerCamelCase : int=None, **lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase__ = self._all_layers
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = self._return_layers
lowercase__ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = None
lowercase__ = tuple(lowerCamelCase )
lowercase__ = tuple(lowerCamelCase ) if hidden_states is not None else None
if not return_dict:
lowercase__ = (feature_maps,)
if output_hidden_states:
lowercase__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase, hidden_states=lowerCamelCase, attentions=lowerCamelCase )
| 207 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
UpperCAmelCase__ : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Any = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase__ : Optional[int] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
UpperCAmelCase__ : Union[str, Any] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase__ : Optional[Any] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
UpperCAmelCase__ : List[Any] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCamelCase__ ( a ) -> List[Any]:
_A: List[str] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , a )
return [m.group(0 ) for m in matches]
def lowerCamelCase__ ( ) -> Any:
_A: int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_A: Any = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_A: Union[str, Any] = collections.defaultdict(a )
_A: List[str] = collections.defaultdict(a )
_A: int = collections.defaultdict(a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(a ):
_A: List[str] = None
if _re_tf_models.match(a ) is not None:
_A: List[Any] = tf_models
_A: Union[str, Any] = _re_tf_models.match(a ).groups()[0]
elif _re_flax_models.match(a ) is not None:
_A: Optional[Any] = flax_models
_A: str = _re_flax_models.match(a ).groups()[0]
elif _re_pt_models.match(a ) is not None:
_A: Any = pt_models
_A: List[str] = _re_pt_models.match(a ).groups()[0]
if lookup_dict is not None:
while len(a ) > 0:
if attr_name in model_prefix_to_model_type:
_A: Tuple = True
break
# Try again after removing the last word in the name
_A: List[Any] = ''''''.join(camel_case_split(a )[:-1] )
_A: str = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_A: Tuple = list(a )
all_models.sort()
_A: str = {'''model_type''': all_models}
_A: int = [pt_models[t] for t in all_models]
_A: str = [tf_models[t] for t in all_models]
_A: List[str] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_A: Tuple = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_A: Optional[int] = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_A: Any = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_A: Dict = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_A: List[Any] = '''AutoTokenizer'''
_A: Optional[int] = [processors[t] for t in all_models]
return pd.DataFrame(a )
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Dict = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_A: Any = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
_A: Dict = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(a , a , a ):
# The type of pipeline may not exist in this framework
if not hasattr(a , a ):
continue
# First extract all model_names
_A: List[str] = []
for name in getattr(a , a ).values():
if isinstance(a , a ):
model_names.append(a )
else:
model_names.extend(list(a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCamelCase__ ( a , a ) -> List[Any]:
_A: Optional[int] = get_frameworks_table()
_A: List[Any] = Dataset.from_pandas(a )
_A: List[Any] = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=a )
_A: Any = Dataset.from_json(a )
_A: Tuple = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(a ) )
}
_A: str = update_pipeline_and_auto_class_table(a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_A: str = sorted(table.keys() )
_A: List[str] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
_A: Dict = Dataset.from_pandas(a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(a , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(a , '''pipeline_tags.json''' ) )
if commit_sha is not None:
_A: Union[str, Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_A: str = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=a , repo_type='''dataset''' , token=a , commit_message=a , )
def lowerCamelCase__ ( ) -> List[Any]:
_A: Optional[int] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_A: Union[str, Any] = transformers_module.pipelines.SUPPORTED_TASKS
_A: int = []
for key in pipeline_tasks:
if key not in in_table:
_A: Dict = pipeline_tasks[key]['''pt''']
if isinstance(a , (list, tuple) ):
_A: Optional[Any] = model[0]
_A: Optional[int] = model.__name__
if model not in in_table.values():
missing.append(a )
if len(a ) > 0:
_A: Optional[Any] = ''', '''.join(a )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
UpperCAmelCase__ : List[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 361 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = '''mobilenet_v1'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : str=2_2_4 , lowerCAmelCase_ : List[str]=1.0 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple="relu6" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=0.999 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[Any]=0.001 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A: Any = num_channels
_A: Optional[int] = image_size
_A: Optional[Any] = depth_multiplier
_A: Tuple = min_depth
_A: Any = hidden_act
_A: Dict = tf_padding
_A: List[Any] = classifier_dropout_prob
_A: Tuple = initializer_range
_A: Tuple = layer_norm_eps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return 1e-4
| 301 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> Dict:
_snake_case : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : Optional[int] = ''
else:
_snake_case : str = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Union[str, Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_snake_case : int = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Dict = in_proj_weight[
: config.hidden_size, :
]
_snake_case : str = in_proj_bias[: config.hidden_size]
_snake_case : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : Optional[int] = in_proj_bias[-config.hidden_size :]
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
_snake_case : Dict = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
_snake_case : Dict = dct.pop(__snake_case )
_snake_case : Any = val
def lowercase ( ) -> str:
_snake_case : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case : Dict = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=True ) -> List[str]:
_snake_case : List[str] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_snake_case : Optional[int] = 8
# set labels if required
if not base_model:
_snake_case : Optional[Any] = 1_000
_snake_case : Union[str, Any] = 'huggingface/label-files'
_snake_case : Union[str, Any] = 'imagenet-1k-id2label.json'
_snake_case : List[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) )
_snake_case : int = {int(__snake_case ): v for k, v in idalabel.items()}
_snake_case : Optional[int] = idalabel
_snake_case : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_snake_case : List[Any] = 384
_snake_case : str = 1_536
_snake_case : Optional[Any] = 12
_snake_case : List[str] = 6
# load original model from torch hub
_snake_case : List[Any] = torch.hub.load("""facebookresearch/dino:main""" , __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case : Tuple = original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
_snake_case : Any = create_rename_keys(__snake_case , base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
if base_model:
_snake_case : Optional[int] = ViTModel(__snake_case , add_pooling_layer=__snake_case ).eval()
else:
_snake_case : Any = ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
_snake_case : int = ViTImageProcessor()
_snake_case : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Any = encoding['pixel_values']
_snake_case : Dict = model(__snake_case )
if base_model:
_snake_case : str = original_model(__snake_case )
assert torch.allclose(__snake_case , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_snake_case : Dict = original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case , outputs.logits , atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
a__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 317 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : int = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''roberta-prelayernorm'''
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = vocab_size
__A : List[Any] = hidden_size
__A : Optional[int] = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : List[str] = hidden_act
__A : Dict = intermediate_size
__A : Optional[int] = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : Tuple = max_position_embeddings
__A : Union[str, Any] = type_vocab_size
__A : Any = initializer_range
__A : str = layer_norm_eps
__A : int = position_embedding_type
__A : Optional[Any] = use_cache
__A : Any = classifier_dropout
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
]) | 190 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A_ :Any = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
A_ :Dict = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A_ :str = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[str] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A_ :int = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Tuple = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
A_ :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
A_ :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
A_ :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
A_ :Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
A_ :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
A_ :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
A_ :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
A_ :Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
A_ :Optional[Any] = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
A_ :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
A_ :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
A_ :str = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
A_ :str = ''''''
A_ :Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[str] = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def A ( a_ ,a_ ) -> List[str]:
assert ReadMe.from_string(a_ ,a_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def A ( a_ ,a_ ) -> int:
with pytest.raises(a_ ,match=re.escape(expected_error.format(path='root' ) ) ):
__UpperCamelCase : List[Any] =ReadMe.from_string(a_ ,a_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ,a_ ) -> Union[str, Any]:
with pytest.raises(a_ ,match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(a_ ,a_ )
@pytest.mark.parametrize(
'readme_md,' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ) -> Tuple:
ReadMe.from_string(a_ ,a_ ,suppress_parsing_errors=a_ )
@pytest.mark.parametrize(
'readme_md, expected_dict' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def A ( a_ ,a_ ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Dict =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[Any] =ReadMe.from_readme(a_ ,a_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def A ( a_ ,a_ ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Any =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[int] =expected_error.format(path=a_ )
with pytest.raises(a_ ,match=re.escape(a_ ) ):
__UpperCamelCase : List[str] =ReadMe.from_readme(a_ ,a_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ,a_ ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Optional[Any] =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[int] =expected_error.format(path=a_ )
with pytest.raises(a_ ,match=re.escape(a_ ) ):
ReadMe.from_readme(a_ ,a_ )
@pytest.mark.parametrize(
'readme_md,' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Union[str, Any] =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
ReadMe.from_readme(a_ ,a_ ,suppress_parsing_errors=a_ )
| 245 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A ( a_ = 3 ) -> qiskit.result.counts.Counts:
if isinstance(a_ ,a_ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(a_ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
__UpperCamelCase : str =QuantumRegister(a_ ,'qr' )
__UpperCamelCase : Optional[int] =ClassicalRegister(a_ ,'cr' )
__UpperCamelCase : Optional[Any] =QuantumCircuit(a_ ,a_ )
__UpperCamelCase : Any =number_of_qubits
for i in range(a_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(a_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,a_ ,a_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(a_ ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(a_ ,a_ )
# simulate with 10000 shots
__UpperCamelCase : Any =Aer.get_backend('qasm_simulator' )
__UpperCamelCase : Tuple =execute(a_ ,a_ ,shots=10_000 )
return job.result().get_counts(a_ )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 245 | 1 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( __UpperCamelCase ):
if len(__UpperCamelCase ) == 0:
return array
__lowercase ,__lowercase : Any = min(__UpperCamelCase ), max(__UpperCamelCase )
# Compute the variables
__lowercase : Optional[Any] = _max - _min + 1
__lowercase ,__lowercase : Optional[int] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__lowercase : int = i - _min
__lowercase : Optional[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__lowercase : Any = 0
for i in range(__UpperCamelCase ):
while holes_repeat[i] > 0:
__lowercase : Optional[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = input('Enter numbers separated by comma:\n')
a_ = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 249 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
a_ = '\nHuman: <<task>>\n\nAssistant: '
a_ = 'huggingface-tools/default-prompts'
a_ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="run" ):
if prompt_or_repo_id is None:
__lowercase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __UpperCamelCase ) is not None:
return prompt_or_repo_id
__lowercase : List[Any] = cached_file(
__UpperCamelCase , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 249 | 1 |
"""simple docstring"""
from __future__ import annotations
lowercase__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple[list[list[int]], list[list[int]]]:
a__: Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the reference grid
a__: Any = 1
a__: Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the action grid
a__: List[str] = init[0]
a__: List[Any] = init[1]
a__: Tuple = 0
a__: Dict = g + heuristic[x][y] # cost from starting cell to destination cell
a__: int = [[f, g, x, y]]
a__: str = False # flag that is set when search is complete
a__: List[str] = False # flag set if we can't find expand
while not found and not resign:
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
a__: Optional[Any] = cell.pop()
a__: Dict = next_cell[2]
a__: Union[str, Any] = next_cell[3]
a__: Tuple = next_cell[1]
if x == goal[0] and y == goal[1]:
a__: int = True
else:
for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
a__: int = x + DIRECTIONS[i][0]
a__: Optional[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
a__: List[str] = g + cost
a__: List[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
a__: Optional[int] = 1
a__: int = i
a__: Optional[int] = []
a__: str = goal[0]
a__: Union[str, Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
a__: Optional[int] = x - DIRECTIONS[action[x][y]][0]
a__: Tuple = y - DIRECTIONS[action[x][y]][1]
a__: Union[str, Any] = xa
a__: Tuple = ya
invpath.append([x, y] )
a__: Optional[Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowercase__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowercase__ = [0, 0]
# all coordinates are given in format [y,x]
lowercase__ = [len(grid) - 1, len(grid[0]) - 1]
lowercase__ = 1
# the cost map which pushes the path closer to the goal
lowercase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowercase__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowercase__ = 99
lowercase__ , lowercase__ = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 203 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
a__: Any = credit_card_number
a__: Tuple = 0
a__: List[str] = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
a__: Tuple = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a__: Optional[Any] = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
a__: Optional[int] = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 203 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=__snake_case , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=__snake_case , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=__snake_case , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=__snake_case , default=0 , help='''cuda_id.''' , )
_lowercase =parser.parse_args()
return args
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
if not len(__snake_case ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
_lowercase , _lowercase =imgs[0].size
_lowercase =Image.new('''RGB''' , size=(cols * w, rows * h) )
_lowercase , _lowercase =grid.size
for i, img in enumerate(__snake_case ):
grid.paste(__snake_case , box=(i % cols * w, i // cols * h) )
return grid
def UpperCAmelCase_ ( __snake_case , __snake_case="robotic cat with wings" , __snake_case=7.5 , __snake_case=50 , __snake_case=1 , __snake_case=42 , ) -> Optional[int]:
"""simple docstring"""
_lowercase =torch.Generator(pipeline.device ).manual_seed(__snake_case )
_lowercase =pipeline(
__snake_case , guidance_scale=__snake_case , num_inference_steps=__snake_case , generator=__snake_case , num_images_per_prompt=__snake_case , ).images
_lowercase =int(math.sqrt(__snake_case ) )
_lowercase =image_grid(__snake_case , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
UpperCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
UpperCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
UpperCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
UpperCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
UpperCAmelCase__ = unet.to(torch.device('''cuda''', args.cuda_id))
UpperCAmelCase__ = pipeline.to(unet.device)
UpperCAmelCase__ ,UpperCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
UpperCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 5 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BioGptTokenizer
__lowerCAmelCase = False
def A (self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : Tuple , _lowerCAmelCase : List[str] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def A (self : List[Any] ):
A = BioGptTokenizer(self.vocab_file , self.merges_file )
A = """lower"""
A = ["""low""", """er</w>"""]
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + ["""<unk>"""]
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A (self : Union[str, Any] ):
A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 258 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__: List[Any] = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Any = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Any = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__magic_name__: str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__magic_name__: str = logging.getLogger(__name__)
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : int = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""", type=_A, default="""data/dump.txt""", help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""", type=_A, default="""bert""", choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""", type=_A, default="""bert-base-uncased""", help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""", type=_A, default="""data/dump""", help="""The dump file prefix.""" )
__magic_name__ : Dict = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
__magic_name__ : Tuple = BertTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
__magic_name__ : Optional[int] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
__magic_name__ : Optional[int] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
__magic_name__ : Any = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
__magic_name__ : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : int = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
__magic_name__ : Optional[int] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path, """r""", encoding="""utf8""" ) as fp:
__magic_name__ : Tuple = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f'{len(_A )} examples to process.' )
__magic_name__ : List[Any] = []
__magic_name__ : str = 0
__magic_name__ : str = 10000
__magic_name__ : Dict = time.time()
for text in data:
__magic_name__ : Tuple = f'{bos} {text.strip()} {sep}'
__magic_name__ : Optional[int] = tokenizer.encode(_A, add_special_tokens=_A )
rslt.append(_A )
iter += 1
if iter % interval == 0:
__magic_name__ : Union[str, Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
__magic_name__ : Any = time.time()
logger.info("""Finished binarization""" )
logger.info(f'{len(_A )} examples processed.' )
__magic_name__ : Tuple = f'{args.dump_file}.{args.tokenizer_name}.pickle'
__magic_name__ : Tuple = tokenizer.vocab_size
if vocab_size < (1 << 16):
__magic_name__ : Optional[int] = [np.uintaa(_A ) for d in rslt]
else:
__magic_name__ : str = [np.intaa(_A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(_A, """wb""" ) as handle:
pickle.dump(rslt_, _A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 138 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class lowerCamelCase_ :
def __init__( self : int , _A : Tuple , _A : Any=13 , _A : Optional[int]=64 , _A : Optional[Any]=3 , _A : List[str]=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Optional[int]=[1, 2, 10] , _A : int=[7, 3, 3] , _A : Union[str, Any]=[4, 2, 2] , _A : Dict=[2, 1, 1] , _A : Optional[Any]=[2, 2, 2] , _A : Optional[Any]=[False, False, True] , _A : List[Any]=[0.0, 0.0, 0.0] , _A : str=0.0_2 , _A : Tuple=1e-12 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Optional[int]=2 , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : List[str] = patch_sizes
UpperCAmelCase__ : Any = patch_stride
UpperCAmelCase__ : Tuple = patch_padding
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = embed_dim
UpperCAmelCase__ : int = num_heads
UpperCAmelCase__ : Any = stride_kv
UpperCAmelCase__ : str = depth
UpperCAmelCase__ : List[Any] = cls_token
UpperCAmelCase__ : List[Any] = attention_drop_rate
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFCvtModel(config=_A )
UpperCAmelCase__ : List[str] = model(_A , training=_A )
UpperCAmelCase__ : int = (self.image_size, self.image_size)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase__ : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase__ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase_ ( self : Optional[Any] , _A : Optional[Any] , _A : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFCvtForImageClassification(_A )
UpperCAmelCase__ : Any = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFCvtModelTester(self )
UpperCAmelCase__ : Tuple = TFCvtConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Any ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(_A : Dict , _A : Optional[Any] , _A : Dict ):
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Tuple = outputs.hidden_states
UpperCAmelCase__ : int = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[str] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = TFCvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Any:
UpperCAmelCase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase__ : Optional[Any] = model(**_A )
# verify the logits
UpperCAmelCase__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Union[str, Any] = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 181 | 1 |
import math
def lowerCamelCase_ ( lowerCAmelCase: int )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( lowerCAmelCase: float = 0.1 )-> int:
_snake_case : Tuple = 3
_snake_case : Any = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =RobertaTokenizer
a_ : Tuple =RobertaTokenizerFast
a_ : Union[str, Any] =True
a_ : List[Any] ={"""cls_token""": """<s>"""}
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case : Optional[int] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case : List[str] = {'unk_token': '<unk>'}
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : List[str] , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = 'lower newer'
_snake_case : int = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : List[str] = 'lower newer'
_snake_case : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case : Any = tokenizer.tokenize(UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Any = tokens + [tokenizer.unk_token]
_snake_case : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained('roberta-base' )
_snake_case : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase )
_snake_case : int = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase )
_snake_case : Dict = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : int = 'Encode this sequence.'
_snake_case : str = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_snake_case : int = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase , UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_snake_case : List[Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
# Testing spaces after special tokens
_snake_case : Dict = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase )} ) # mask token has a left space
_snake_case : int = tokenizer.convert_tokens_to_ids(UpperCamelCase )
_snake_case : List[Any] = 'Encode <mask> sequence'
_snake_case : Any = 'Encode <mask>sequence'
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase )
_snake_case : str = encoded.index(UpperCamelCase )
_snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Tuple = tokenizer.encode(UpperCamelCase )
_snake_case : Tuple = encoded.index(UpperCamelCase )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : Tuple = 'A, <mask> AllenNLP sentence.'
_snake_case : str = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
_snake_case : Optional[int] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : List[str] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case : Tuple = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : int = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Tuple = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Optional[int] = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Tuple = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : str = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Dict = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ) + 1, 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : str = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Any = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
| 260 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase__ : Union[str, Any] = False
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
SCREAMING_SNAKE_CASE_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 225 |
import copy
import random
from transformers import CLIPTokenizer
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
A__ = {}
def UpperCamelCase ( self , lowercase , *lowercase , **lowercase ) -> str:
'''simple docstring'''
A__ = super().add_tokens(lowercase , *lowercase , **lowercase )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
" `placeholder_token` that is not already in the tokenizer." )
def UpperCamelCase ( self , lowercase , *lowercase , lowercase=1 , **lowercase ) -> Any:
'''simple docstring'''
A__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
else:
A__ = []
for i in range(lowercase ):
A__ = placeholder_token + F'_{i}'
self.try_adding_tokens(lowercase , *lowercase , **lowercase )
output.append(lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
A__ = output
def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=1.0 ) -> List[Any]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = []
for i in range(len(lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A__ = self.token_map[placeholder_token]
A__ = tokens[: 1 + int(len(lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
A__ = copy.copy(lowercase )
random.shuffle(lowercase )
A__ = text.replace(lowercase , " ".join(lowercase ) )
return text
def __call__( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> str:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
def UpperCamelCase ( self , lowercase , *lowercase , lowercase=False , lowercase=1.0 , **lowercase ) -> List[str]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowercase , vector_shuffle=lowercase , prop_tokens_to_load=lowercase ) , *lowercase , **lowercase , )
| 68 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCAmelCase__ : Tuple = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
lowerCAmelCase__ : Optional[int] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
lowerCAmelCase__ : str = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = 0.0
for i, j in zip(_lowerCAmelCase , _lowerCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_lowerCAmelCase , _lowerCAmelCase ) else 0.0
__UpperCAmelCase : Optional[int] = n_correct / len(_lowerCAmelCase )
return {
"accuracy": accuracy,
}
| 370 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), )
)
def __UpperCamelCase ( ):
__UpperCAmelCase : Dict = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : Optional[Any] = math.log(len(_UpperCAmelCase ), 2 )
print(F"Optimal value : {minimax(0, 0, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 37 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE :
_UpperCamelCase:CommonSchedulerState
# setable values
_UpperCamelCase:jnp.ndarray
_UpperCamelCase:jnp.ndarray
_UpperCamelCase:Optional[int] = None
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
return cls(common=_SCREAMING_SNAKE_CASE , init_noise_sigma=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
@dataclass
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:DDPMSchedulerState
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__):
_UpperCamelCase:Optional[int] = [e.name for e in FlaxKarrasDiffusionSchedulers]
_UpperCamelCase:jnp.dtype
@property
def _snake_case ( self )-> List[str]:
return True
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 1000 , _SCREAMING_SNAKE_CASE = 0.0_0_0_1 , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = "linear" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "fixed_small" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "epsilon" , _SCREAMING_SNAKE_CASE = jnp.floataa , )-> Optional[int]:
lowerCamelCase_ =dtype
def _snake_case ( self , _SCREAMING_SNAKE_CASE = None )-> DDPMSchedulerState:
if common is None:
lowerCamelCase_ =CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase_ =jnp.array(1.0 , dtype=self.dtype )
lowerCamelCase_ =jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_SCREAMING_SNAKE_CASE , init_noise_sigma=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> jnp.ndarray:
return sample
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = () )-> DDPMSchedulerState:
lowerCamelCase_ =self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase_ =(jnp.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Dict:
lowerCamelCase_ =state.common.alphas_cumprod[t]
lowerCamelCase_ =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase_ =(1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase_ =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase_ =jnp.clip(_SCREAMING_SNAKE_CASE , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase_ =jnp.log(jnp.clip(_SCREAMING_SNAKE_CASE , a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase_ =state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase_ =jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase_ =variance
lowerCamelCase_ =state.common.betas[t]
lowerCamelCase_ =(predicted_variance + 1) / 2
lowerCamelCase_ =frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
lowerCamelCase_ =timestep
if key is None:
lowerCamelCase_ =jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase_ , lowerCamelCase_ =jnp.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowerCamelCase_ =None
# 1. compute alphas, betas
lowerCamelCase_ =state.common.alphas_cumprod[t]
lowerCamelCase_ =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCamelCase_ =1 - alpha_prod_t
lowerCamelCase_ =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase_ =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase_ =model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase_ =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase_ =jnp.clip(_SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ =(alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase_ =state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase_ =jax.random.split(_SCREAMING_SNAKE_CASE , num=1 )
lowerCamelCase_ =jax.random.normal(_SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowerCamelCase_ =jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCamelCase_ =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> jnp.ndarray:
return add_noise_common(state.common , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> jnp.ndarray:
return get_velocity_common(state.common , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __len__( self )-> str:
return self.config.num_train_timesteps
| 154 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __UpperCamelCase ( _A : List[str] , _A : Union[str, Any] , _A : Any , _A : Optional[int] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =s.rsplit(_A , _A )
return new.join(_A )
def __UpperCamelCase ( _A : List[Any] ) ->Dict:
"""simple docstring"""
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( _A : str ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ =["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase_ =key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowerCamelCase_ =key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowerCamelCase_ =rreplace(_A , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowerCamelCase_ =rreplace(_A , """.b""" , """.bias""" , 1 )
lowerCamelCase_ =value.float()
return upgrade
@torch.no_grad()
def __UpperCamelCase ( _A : Optional[int] , _A : Union[str, Any] , _A : List[Any]=None , _A : Dict=True ) ->Optional[int]:
"""simple docstring"""
from dall_e import Encoder
lowerCamelCase_ =Encoder()
if os.path.exists(_A ):
lowerCamelCase_ =torch.load(_A )
else:
lowerCamelCase_ =torch.hub.load_state_dict_from_url(_A )
if isinstance(_A , _A ):
lowerCamelCase_ =ckpt.state_dict()
encoder.load_state_dict(_A )
if config_path is not None:
lowerCamelCase_ =FlavaImageCodebookConfig.from_pretrained(_A )
else:
lowerCamelCase_ =FlavaImageCodebookConfig()
lowerCamelCase_ =FlavaImageCodebook(_A ).eval()
lowerCamelCase_ =encoder.state_dict()
lowerCamelCase_ =upgrade_state_dict(_A )
hf_model.load_state_dict(_A )
lowerCamelCase_ =hf_model.state_dict()
lowerCamelCase_ =count_parameters(_A )
lowerCamelCase_ =count_parameters(_A )
assert torch.allclose(_A , _A , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(_A )
else:
return hf_state_dict
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__A : List[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 154 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.dummy_uncond_unet
_lowerCamelCase : Union[str, Any] = KarrasVeScheduler()
_lowerCamelCase : Any = KarrasVePipeline(unet=lowercase , scheduler=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : str = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(num_inference_steps=2 , generator=lowercase , output_type='numpy' ).images
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase , output_type='numpy' , return_dict=lowercase )[0]
_lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
_lowerCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Tuple = 'google/ncsnpp-celebahq-256'
_lowerCamelCase : List[Any] = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : List[Any] = KarrasVeScheduler()
_lowerCamelCase : Tuple = KarrasVePipeline(unet=lowercase , scheduler=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Any = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(num_inference_steps=20 , generator=lowercase , output_type='numpy' ).images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : Any = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 12 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None ):
_lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase )
_lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 12 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=a__ ):
lowerCAmelCase :Optional[Any] = ['''torch''', '''scipy''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(self , ["""torch""", """scipy"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""torch""", """scipy"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""torch""", """scipy"""]) | 163 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = 3_8_4
if "tiny" in model_name:
UpperCAmelCase__ : int = [3, 3, 9, 3]
UpperCAmelCase__ : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
UpperCAmelCase__ : Optional[int] = [3, 3, 2_7, 3]
UpperCAmelCase__ : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
UpperCAmelCase__ : List[str] = [3, 3, 2_7, 3]
UpperCAmelCase__ : str = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
UpperCAmelCase__ : Optional[int] = 5_1_2
if "large" in model_name:
UpperCAmelCase__ : Optional[Any] = [3, 3, 2_7, 3]
UpperCAmelCase__ : Optional[int] = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
UpperCAmelCase__ : Optional[int] = 7_6_8
if "xlarge" in model_name:
UpperCAmelCase__ : Tuple = [3, 3, 2_7, 3]
UpperCAmelCase__ : int = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
UpperCAmelCase__ : int = 1_0_2_4
# set label information
UpperCAmelCase__ : Tuple = 1_5_0
UpperCAmelCase__ : Union[str, Any] = """huggingface/label-files"""
UpperCAmelCase__ : Tuple = """ade20k-id2label.json"""
UpperCAmelCase__ : Dict = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ : str = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : str = ConvNextConfig(
depths=UpperCamelCase__ , hidden_sizes=UpperCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCAmelCase__ : Union[str, Any] = UperNetConfig(
backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : str = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : Dict = val
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[str] = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
UpperCAmelCase__ : Tuple = model_name_to_url[model_name]
UpperCAmelCase__ : int = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""state_dict"""]
UpperCAmelCase__ : int = get_upernet_config(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = UperNetForSemanticSegmentation(UpperCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase__ : Optional[Any] = state_dict.pop(UpperCamelCase__ )
if "bn" in key:
UpperCAmelCase__ : List[str] = key.replace("""bn""" , """batch_norm""" )
UpperCAmelCase__ : List[Any] = val
# rename keys
UpperCAmelCase__ : Any = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify on image
UpperCAmelCase__ : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
UpperCAmelCase__ : Optional[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
UpperCAmelCase__ : Optional[int] = SegformerImageProcessor()
UpperCAmelCase__ : Dict = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(UpperCamelCase__ )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase__ : Any = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase__ : Dict = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase__ : str = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase__ : List[str] = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A =parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 163 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Any = XLMProphetNetTokenizer
__snake_case : Union[str, Any] = False
__snake_case : Optional[int] = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """[PAD]"""
SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""[PAD]""" )
self.assertEqual(vocab_keys[1] ,"""[CLS]""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(lowerCamelCase__ ) ,1012 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1012 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] ,)
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """Hello World!"""
SCREAMING_SNAKE_CASE = [35389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase__ ,self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""input_ids""": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ ,model_name="""microsoft/xprophetnet-large-wiki100-cased""" ,revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" ,)
| 368 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE_ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE_ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE_ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE_ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : Optional[str] = "relu" ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(
lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=lowerCamelCase__ ,stride=lowerCamelCase__ ,padding=kernel_size // 2 ,groups=lowerCamelCase__ ,bias=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.convolution(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.normalization(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : RegNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
SCREAMING_SNAKE_CASE = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
SCREAMING_SNAKE_CASE = self.embedder(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 2 ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,stride=lowerCamelCase__ ,bias=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.convolution(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.normalization(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ) ,nn.Sigmoid() ,)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.pooler(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.attention(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_state * attention
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 1 ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
RegNetShortCut(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,groups=lowerCamelCase__ ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=lowerCamelCase__ ) ,)
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.shortcut(lowerCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 1 ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
RegNetShortCut(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,groups=lowerCamelCase__ ,activation=config.hidden_act ) ,RegNetSELayer(lowerCamelCase__ ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=lowerCamelCase__ ) ,)
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.shortcut(lowerCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 2 ,) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
SCREAMING_SNAKE_CASE = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,) ,*[layer(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) for _ in range(depth - 1 )] ,)
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.layers(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : RegNetConfig ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
SCREAMING_SNAKE_CASE = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase__ ,config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,depth=lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE = stage_module(lowerCamelCase__ )
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ ,hidden_states=lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = RegNetConfig
__snake_case : Union[str, Any] = "regnet"
__snake_case : Optional[Any] = "pixel_values"
__snake_case : List[Any] = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode="""fan_out""" ,nonlinearity="""relu""" )
elif isinstance(lowerCamelCase__ ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str=False ) -> str:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE_ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE_ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = RegNetEmbeddings(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = RegNetEncoder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=lowerCamelCase__ ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.embedder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.encoder(
lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,return_dict=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = encoder_outputs[0]
SCREAMING_SNAKE_CASE = self.pooler(lowerCamelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ ,pooler_output=lowerCamelCase__ ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any ,lowerCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = config.num_labels
SCREAMING_SNAKE_CASE = RegNetModel(lowerCamelCase__ )
# classification head
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=lowerCamelCase__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Optional[torch.FloatTensor] = None ,lowerCamelCase__ : Optional[torch.LongTensor] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.regnet(lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,return_dict=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE = self.classifier(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
SCREAMING_SNAKE_CASE = loss_fct(lowerCamelCase__ ,lowerCamelCase__ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE = CrossEntropyLoss()
SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE = loss_fct(lowerCamelCase__ ,lowerCamelCase__ )
if not return_dict:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase__ ,logits=lowerCamelCase__ ,hidden_states=outputs.hidden_states )
| 193 | 0 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase__ = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase__ = None
def __lowerCAmelCase ():
__lowerCAmelCase : Dict = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_a , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_a , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCAmelCase : Any = bool(qa['answers']['text'] )
return qid_to_has_ans
def __lowerCAmelCase (_UpperCamelCase ):
def remove_articles(_UpperCamelCase ):
return ARTICLES_REGEX.sub(' ' , _a )
def white_space_fix(_UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(_UpperCamelCase ):
__lowerCAmelCase : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_a ) ) ) )
def __lowerCAmelCase (_UpperCamelCase ):
if not s:
return []
return normalize_answer(_a ).split()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
return int(normalize_answer(_a ) == normalize_answer(_a ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = get_tokens(_a )
__lowerCAmelCase : Any = get_tokens(_a )
__lowerCAmelCase : Union[str, Any] = collections.Counter(_a ) & collections.Counter(_a )
__lowerCAmelCase : Union[str, Any] = sum(common.values() )
if len(_a ) == 0 or len(_a ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCAmelCase : Optional[int] = 1.0 * num_same / len(_a )
__lowerCAmelCase : str = 1.0 * num_same / len(_a )
__lowerCAmelCase : str = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : int = {}
__lowerCAmelCase : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCAmelCase : str = qa['id']
__lowerCAmelCase : str = [t for t in qa['answers']['text'] if normalize_answer(_a )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCAmelCase : Optional[int] = ['']
if qid not in preds:
print(F"Missing prediction for {qid}" )
continue
__lowerCAmelCase : int = preds[qid]
# Take max over all gold answers
__lowerCAmelCase : Union[str, Any] = max(compute_exact(_a , _a ) for a in gold_answers )
__lowerCAmelCase : Optional[Any] = max(compute_fa(_a , _a ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Dict = {}
for qid, s in scores.items():
__lowerCAmelCase : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCAmelCase : List[str] = float(not qid_to_has_ans[qid] )
else:
__lowerCAmelCase : int = s
return new_scores
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
if not qid_list:
__lowerCAmelCase : Union[str, Any] = len(_a )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
__lowerCAmelCase : str = len(_a )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for k in new_eval:
__lowerCAmelCase : str = new_eval[k]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
plt.step(_a , _a , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_a , _a , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_a )
plt.savefig(_a )
plt.clf()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ):
__lowerCAmelCase : str = sorted(_a , key=lambda _UpperCamelCase : na_probs[k] )
__lowerCAmelCase : Optional[Any] = 0.0
__lowerCAmelCase : Tuple = 1.0
__lowerCAmelCase : Any = 0.0
__lowerCAmelCase : Union[str, Any] = [1.0]
__lowerCAmelCase : Any = [0.0]
__lowerCAmelCase : Tuple = 0.0
for i, qid in enumerate(_a ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCAmelCase : Optional[Any] = true_pos / float(i + 1 )
__lowerCAmelCase : Optional[int] = true_pos / float(_a )
if i == len(_a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_a )
recalls.append(_a )
if out_image:
plot_pr_curve(_a , _a , _a , _a )
return {"ap": 100.0 * avg_prec}
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if out_image_dir and not os.path.exists(_a ):
os.makedirs(_a )
__lowerCAmelCase : Optional[Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCAmelCase : int = make_precision_recall_eval(
_a , _a , _a , _a , out_image=os.path.join(_a , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
__lowerCAmelCase : List[Any] = make_precision_recall_eval(
_a , _a , _a , _a , out_image=os.path.join(_a , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
__lowerCAmelCase : Union[str, Any] = {k: float(_a ) for k, v in qid_to_has_ans.items()}
__lowerCAmelCase : Optional[int] = make_precision_recall_eval(
_a , _a , _a , _a , out_image=os.path.join(_a , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_a , _a , 'pr_exact' )
merge_eval(_a , _a , 'pr_f1' )
merge_eval(_a , _a , 'pr_oracle' )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if not qid_list:
return
__lowerCAmelCase : Tuple = [na_probs[k] for k in qid_list]
__lowerCAmelCase : Optional[int] = np.ones_like(_a ) / float(len(_a ) )
plt.hist(_a , weights=_a , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(_a , F"na_prob_hist_{name}.png" ) )
plt.clf()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCAmelCase : List[Any] = num_no_ans
__lowerCAmelCase : Any = cur_score
__lowerCAmelCase : Any = 0.0
__lowerCAmelCase : Dict = sorted(_a , key=lambda _UpperCamelCase : na_probs[k] )
for i, qid in enumerate(_a ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCAmelCase : Union[str, Any] = scores[qid]
else:
if preds[qid]:
__lowerCAmelCase : int = -1
else:
__lowerCAmelCase : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
__lowerCAmelCase : List[Any] = cur_score
__lowerCAmelCase : Any = na_probs[qid]
return 100.0 * best_score / len(_a ), best_thresh
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase : List[Any] = find_best_thresh(_a , _a , _a , _a )
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = find_best_thresh(_a , _a , _a , _a )
__lowerCAmelCase : List[Any] = best_exact
__lowerCAmelCase : str = exact_thresh
__lowerCAmelCase : Any = best_fa
__lowerCAmelCase : Union[str, Any] = fa_thresh
def __lowerCAmelCase ():
with open(OPTS.data_file ) as f:
__lowerCAmelCase : Any = json.load(_a )
__lowerCAmelCase : str = dataset_json['data']
with open(OPTS.pred_file ) as f:
__lowerCAmelCase : Union[str, Any] = json.load(_a )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCAmelCase : Union[str, Any] = json.load(_a )
else:
__lowerCAmelCase : Tuple = {k: 0.0 for k in preds}
__lowerCAmelCase : Optional[Any] = make_qid_to_has_ans(_a ) # maps qid to True/False
__lowerCAmelCase : Any = [k for k, v in qid_to_has_ans.items() if v]
__lowerCAmelCase : int = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = get_raw_scores(_a , _a )
__lowerCAmelCase : Union[str, Any] = apply_no_ans_threshold(_a , _a , _a , OPTS.na_prob_thresh )
__lowerCAmelCase : str = apply_no_ans_threshold(_a , _a , _a , OPTS.na_prob_thresh )
__lowerCAmelCase : Tuple = make_eval_dict(_a , _a )
if has_ans_qids:
__lowerCAmelCase : Dict = make_eval_dict(_a , _a , qid_list=_a )
merge_eval(_a , _a , 'HasAns' )
if no_ans_qids:
__lowerCAmelCase : List[str] = make_eval_dict(_a , _a , qid_list=_a )
merge_eval(_a , _a , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_a , _a , _a , _a , _a , _a )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_a , _a , _a , _a , _a , OPTS.out_image_dir )
histogram_na_prob(_a , _a , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_a , _a , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_a , _a )
else:
print(json.dumps(_a , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main() | 86 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A =logging.getLogger()
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase = parser.parse_args()
return args.f
def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ):
UpperCAmelCase = os.path.join(_a , F"{split}_results.json" )
if os.path.exists(_a ):
with open(_a , '''r''' ) as f:
return json.load(_a )
raise ValueError(F"can't find {path}" )
A =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( __a ):
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_glue.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_clm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_summarization_flax.main()
UpperCAmelCase = get_results(lowercase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_ta_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_ner.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_qa.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__lowerCamelCase = str(bin(UpperCamelCase__ ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__lowerCamelCase = str(bin(UpperCamelCase__ ) )[2:]
if shift_amount >= len(UpperCamelCase__ ):
return "0b0"
__lowerCamelCase = binary_number[: len(UpperCamelCase__ ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
__lowerCamelCase = '0' + str(bin(UpperCamelCase__ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
__lowerCamelCase = len(bin(UpperCamelCase__ )[3:] ) # Find 2's complement of number
__lowerCamelCase = bin(abs(UpperCamelCase__ ) - (1 << binary_number_length) )[3:]
__lowerCamelCase = (
'1' + '0' * (binary_number_length - len(UpperCamelCase__ )) + binary_number
)
if shift_amount >= len(UpperCamelCase__ ):
return "0b" + binary_number[0] * len(UpperCamelCase__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCamelCase__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ["model.decoder.embed_positions.weights"]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
__lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowerCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowerCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowerCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowerCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
__lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' )
__lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 348 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Tuple = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ["MobileViTFeatureExtractor"]
a__ : List[str] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313 |
from __future__ import annotations
_snake_case : Any = "Muhammad Umer Farooq"
_snake_case : Optional[int] = "MIT"
_snake_case : Union[str, Any] = "1.0.0"
_snake_case : Optional[Any] = "Muhammad Umer Farooq"
_snake_case : List[Any] = "contact@muhammadumerfarooq.me"
_snake_case : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : str ) -> None:
super().__init__()
__snake_case : list[str] = []
__snake_case : Any = domain
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : list[tuple[str, str | None]] ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Any = parse.urljoin(self.domain , lowerCamelCase )
self.urls.append(lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
return ".".join(get_sub_domain_name(__lowerCamelCase ).split("." )[-2:] )
def lowerCAmelCase_ ( __lowerCamelCase ):
return parse.urlparse(__lowerCamelCase ).netloc
def lowerCAmelCase_ ( __lowerCamelCase = "https://github.com" ):
__snake_case : Tuple = get_domain_name(__lowerCamelCase )
# Initialize the parser
__snake_case : Dict = Parser(__lowerCamelCase )
try:
# Open URL
__snake_case : Any = requests.get(__lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[str] = requests.get(__lowerCamelCase )
# Get the valid email.
__snake_case : Any = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = emails_from_url("https://github.com")
print(f'''{len(emails)} emails found:''')
print("\n".join(sorted(emails)))
| 123 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase_ = """pytorch_model.bin"""
lowerCamelCase_ = """pytorch_model.bin.index.json"""
lowerCamelCase_ = """adapter_config.json"""
lowerCamelCase_ = """adapter_model.bin"""
lowerCamelCase_ = """adapter_model.safetensors"""
lowerCamelCase_ = """tf_model.h5"""
lowerCamelCase_ = """tf_model.h5.index.json"""
lowerCamelCase_ = """model.ckpt"""
lowerCamelCase_ = """flax_model.msgpack"""
lowerCamelCase_ = """flax_model.msgpack.index.json"""
lowerCamelCase_ = """model.safetensors"""
lowerCamelCase_ = """model.safetensors.index.json"""
lowerCamelCase_ = """config.json"""
lowerCamelCase_ = """preprocessor_config.json"""
lowerCamelCase_ = FEATURE_EXTRACTOR_NAME
lowerCamelCase_ = """generation_config.json"""
lowerCamelCase_ = """modelcard.json"""
lowerCamelCase_ = """▁"""
lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCamelCase ( a_ ) -> Dict:
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
lowerCAmelCase_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 14 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = {}, {}
if padding is not None:
lowerCAmelCase_ = padding
if truncation is not None:
lowerCAmelCase_ = truncation
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = {'image': image, 'question': question}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ )
return results
def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = load_image(inputs['image'] )
lowerCAmelCase_ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.sigmoid()[0]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 14 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase_ = random.Random()
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=1.0 , _lowerCamelCase : Tuple=None , _lowerCamelCase : List[Any]=None ) -> Union[str, Any]:
if rng is None:
_lowerCAmelCase : str = global_rng
_lowerCAmelCase : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a_ (unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=4_0_0 , snake_case_=2_0_0_0 , snake_case_=2_4 , snake_case_=2_4 , snake_case_=0.0 , snake_case_=1_6_0_0_0 , snake_case_=True , snake_case_=True , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Any = min_seq_length
_lowerCAmelCase : List[str] = max_seq_length
_lowerCAmelCase : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase : Tuple = feature_size
_lowerCAmelCase : List[str] = num_mel_bins
_lowerCAmelCase : Tuple = padding_value
_lowerCAmelCase : int = sampling_rate
_lowerCAmelCase : Optional[Any] = return_attention_mask
_lowerCAmelCase : Tuple = do_normalize
def __UpperCamelCase ( self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_lowerCAmelCase : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : Any = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a_ (A__ , unittest.TestCase ):
__lowerCAmelCase : List[str] = SpeechaTextFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = SpeechaTextFeatureExtractionTester(self )
def __UpperCamelCase ( self , snake_case_ ):
self.assertTrue(np.all(np.mean(snake_case_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case_ , axis=0 ) - 1 ) < 1E-3 ) )
def __UpperCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : str = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase : Optional[Any] = feature_extractor(snake_case_ , padding=snake_case_ , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_lowerCAmelCase : Any = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_lowerCAmelCase : int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test batched
_lowerCAmelCase : int = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCAmelCase : int = np.asarray(snake_case_ )
_lowerCAmelCase : int = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
_lowerCAmelCase : int = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Union[str, Any] = ["""longest""", """max_length""", """do_not_pad"""]
_lowerCAmelCase : Tuple = [None, 1_6, None]
for max_length, padding in zip(snake_case_ , snake_case_ ):
_lowerCAmelCase : str = feature_extractor(
snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_attention_mask=snake_case_ )
_lowerCAmelCase : List[str] = inputs.input_features
_lowerCAmelCase : List[str] = inputs.attention_mask
_lowerCAmelCase : int = [np.sum(snake_case_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
_lowerCAmelCase : Optional[Any] = [None, 1_6, None]
for max_length, padding in zip(snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = feature_extractor(
snake_case_ , max_length=snake_case_ , padding=snake_case_ , return_tensors="""np""" , return_attention_mask=snake_case_ )
_lowerCAmelCase : List[str] = inputs.input_features
_lowerCAmelCase : Union[str, Any] = inputs.attention_mask
_lowerCAmelCase : Tuple = [np.sum(snake_case_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Union[str, Any] = feature_extractor(
snake_case_ , padding="""max_length""" , max_length=4 , truncation=snake_case_ , return_tensors="""np""" , return_attention_mask=snake_case_ , )
_lowerCAmelCase : str = inputs.input_features
_lowerCAmelCase : List[str] = inputs.attention_mask
_lowerCAmelCase : int = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : int = feature_extractor(
snake_case_ , padding="""longest""" , max_length=4 , truncation=snake_case_ , return_tensors="""np""" , return_attention_mask=snake_case_ , )
_lowerCAmelCase : Optional[Any] = inputs.input_features
_lowerCAmelCase : int = inputs.attention_mask
_lowerCAmelCase : Tuple = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
_lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCAmelCase : Any = feature_extractor(
snake_case_ , padding="""longest""" , max_length=1_6 , truncation=snake_case_ , return_tensors="""np""" , return_attention_mask=snake_case_ , )
_lowerCAmelCase : Any = inputs.input_features
_lowerCAmelCase : Optional[int] = inputs.attention_mask
_lowerCAmelCase : List[str] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def __UpperCamelCase ( self ):
import torch
_lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Any = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCAmelCase : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase : Dict = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCAmelCase : Union[str, Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase ( self , snake_case_ ):
from datasets import load_dataset
_lowerCAmelCase : Dict = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_lowerCAmelCase : str = ds.sort("""id""" ).select(range(snake_case_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self ):
# fmt: off
_lowerCAmelCase : Any = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
_lowerCAmelCase : Optional[Any] = self._load_datasamples(1 )
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , snake_case_ , atol=1E-4 ) )
| 309 |
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
__lowerCAmelCase = 0
__lowerCAmelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowerCAmelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowerCAmelCase = counter
if counter > pre_counter:
__lowerCAmelCase = inputa
__lowerCAmelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 301 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A = get_tests_dir("""fixtures""")
__A = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
__A = get_tests_dir("""fixtures/dummy-config.json""")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 0
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCAmelCase__ :Dict = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase ).to_dict()
config_dict.pop('feature_extractor_type' )
lowerCAmelCase__ :Optional[Any] = WavaVecaFeatureExtractor(**__UpperCAmelCase )
# save in new folder
model_config.save_pretrained(__UpperCAmelCase )
config.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ :Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCAmelCase__ :int = AutoFeatureExtractor.from_pretrained('bert-base' )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCAmelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase , revision='aaaaaa' )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowerCAmelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
lowerCAmelCase__ :Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def snake_case ( self ):
'''simple docstring'''
try:
AutoConfig.register('custom' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ :List[Any] = CustomFeatureExtractor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self ):
'''simple docstring'''
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[Any] = True
try:
AutoConfig.register('custom' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# If remote code is not set, the default is to use local
lowerCAmelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(__UpperCAmelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 357 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 254 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a__ :
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None =None
UpperCAmelCase__ : Node | None =None
def __lowercase ( ) -> Node | None:
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(1 )
SCREAMING_SNAKE_CASE : str = Node(2 )
SCREAMING_SNAKE_CASE : int = Node(3 )
SCREAMING_SNAKE_CASE : str = Node(4 )
SCREAMING_SNAKE_CASE : Dict = Node(5 )
return tree
def __lowercase ( _A ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __lowercase ( _A ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __lowercase ( _A ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __lowercase ( _A ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __lowercase ( _A ) -> Sequence[Node | None]:
SCREAMING_SNAKE_CASE : list[Any] = []
if root is None:
return output
SCREAMING_SNAKE_CASE : List[Any] = deque([root] )
while process_queue:
SCREAMING_SNAKE_CASE : Union[str, Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __lowercase ( _A , _A ) -> Sequence[Node | None]:
SCREAMING_SNAKE_CASE : list[Any] = []
def populate_output(_A , _A ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_A , _A )
return output
def __lowercase ( _A , _A ) -> Sequence[Node | None]:
SCREAMING_SNAKE_CASE : list[Any] = []
def populate_output(_A , _A ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_A , _A )
return output
def __lowercase ( _A ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
SCREAMING_SNAKE_CASE : list[Sequence[Node | None]] = []
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Tuple = height(_A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_A , _A ) )
SCREAMING_SNAKE_CASE : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(_A , _A ) )
SCREAMING_SNAKE_CASE : Tuple = 0
return output
def __lowercase ( ) -> None: # Main function for testing.
SCREAMING_SNAKE_CASE : Dict = make_tree()
print(F"In-order Traversal: {inorder(_A )}" )
print(F"Pre-order Traversal: {preorder(_A )}" )
print(F"Post-order Traversal: {postorder(_A )}" , """\n""" )
print(F"Height of Tree: {height(_A )}" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_A ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(_A ) + 1 ):
print(F"Level {level}:" , get_nodes_from_left_to_right(_A , level=_A ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 245 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowercase ( _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE : List[str] = torch.exp(_A )
SCREAMING_SNAKE_CASE : List[str] = torch.sum(_A , dim=1 ) # sum of exp(x_i)
SCREAMING_SNAKE_CASE : Dict = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_A ) - B / A
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase__ : int ) ->List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = config.output_attentions
SCREAMING_SNAKE_CASE : Any = config.output_hidden_states
SCREAMING_SNAKE_CASE : str = nn.ModuleList([BertLayer(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE : str = nn.ModuleList([BertHighway(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE : Union[str, Any] = [-1 for _ in range(config.num_hidden_layers )]
def _lowercase ( self : str , UpperCAmelCase__ : str ) ->Dict:
"""simple docstring"""
if (type(UpperCAmelCase__ ) is float) or (type(UpperCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
SCREAMING_SNAKE_CASE : Tuple = x
else:
SCREAMING_SNAKE_CASE : Optional[Any] = x
def _lowercase ( self : str , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=None , ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ()
SCREAMING_SNAKE_CASE : Dict = ()
SCREAMING_SNAKE_CASE : str = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : Union[str, Any] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : int = layer_module(
UpperCAmelCase__ , UpperCAmelCase__ , head_mask[i] , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = layer_outputs[0]
if self.output_attentions:
SCREAMING_SNAKE_CASE : Dict = all_attentions + (layer_outputs[1],)
SCREAMING_SNAKE_CASE : Optional[int] = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : int = current_outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE : int = current_outputs + (all_attentions,)
SCREAMING_SNAKE_CASE : Dict = self.highway[i](UpperCAmelCase__ )
# logits, pooled_output
if not self.training:
SCREAMING_SNAKE_CASE : str = highway_exit[0]
SCREAMING_SNAKE_CASE : Dict = entropy(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
SCREAMING_SNAKE_CASE : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
SCREAMING_SNAKE_CASE : Dict = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase__ , i + 1 )
else:
SCREAMING_SNAKE_CASE : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : List[str] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : Optional[int] = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE : Optional[int] = outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE : Optional[int] = outputs + (all_attentions,)
SCREAMING_SNAKE_CASE : Any = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , UpperCAmelCase , )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple ) ->List[str]:
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = config
SCREAMING_SNAKE_CASE : Union[str, Any] = BertEmbeddings(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = DeeBertEncoder(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = BertPooler(UpperCAmelCase__ )
self.init_weights()
def _lowercase ( self : str ) ->Optional[int]:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def _lowercase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
return self.embeddings.word_embeddings
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = value
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Dict ) ->str:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : str=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Dict=None , ) ->int:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
SCREAMING_SNAKE_CASE : str = input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
SCREAMING_SNAKE_CASE : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
SCREAMING_SNAKE_CASE : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
SCREAMING_SNAKE_CASE : Optional[int] = encoder_attention_mask[:, None, None, :]
SCREAMING_SNAKE_CASE : Optional[int] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
SCREAMING_SNAKE_CASE : str = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE : str = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE : str = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : int = encoder_outputs[0]
SCREAMING_SNAKE_CASE : int = self.pooler(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = message
SCREAMING_SNAKE_CASE : str = exit_layer # start from 1!
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Any ) ->List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = BertPooler(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(config.hidden_size , config.num_labels )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = encoder_outputs[0]
SCREAMING_SNAKE_CASE : int = self.pooler(UpperCAmelCase__ )
# "return" pooler_output
# BertModel
SCREAMING_SNAKE_CASE : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
SCREAMING_SNAKE_CASE : List[Any] = bmodel_output[1]
SCREAMING_SNAKE_CASE : Any = self.dropout(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = self.classifier(UpperCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , UpperCAmelCase , )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) ->int:
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = config.num_labels
SCREAMING_SNAKE_CASE : int = config.num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = DeeBertModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=-1 , UpperCAmelCase__ : List[str]=False , ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_layers
try:
SCREAMING_SNAKE_CASE : str = self.bert(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
SCREAMING_SNAKE_CASE : Optional[Any] = outputs[1]
SCREAMING_SNAKE_CASE : Any = self.dropout(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.classifier(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE : Optional[int] = e.message
SCREAMING_SNAKE_CASE : Optional[Any] = e.exit_layer
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE : Optional[Any] = entropy(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : List[str] = MSELoss()
SCREAMING_SNAKE_CASE : str = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Tuple = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE : List[Any] = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : Any = MSELoss()
SCREAMING_SNAKE_CASE : List[str] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase__ )
if train_highway:
SCREAMING_SNAKE_CASE : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE : int = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE : List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 245 | 1 |
"""simple docstring"""
from math import loga
def snake_case_ ( A_ : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_lowerCamelCase : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(A_ )
_lowerCamelCase , _lowerCamelCase : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
A_, output_loading_info=A_ )
else:
_lowerCamelCase : str = ProphetNetForConditionalGenerationOld.from_pretrained(A_ )
_lowerCamelCase , _lowerCamelCase : Any = ProphetNetForConditionalGeneration.from_pretrained(
A_, output_loading_info=A_ )
_lowerCamelCase : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
_lowerCamelCase : List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_lowerCamelCase : Union[str, Any] = key.split('''.''' )
if attributes[0] == "lm_head":
_lowerCamelCase : str = prophet
_lowerCamelCase : List[Any] = prophet_old
else:
_lowerCamelCase : Optional[int] = prophet.prophetnet
_lowerCamelCase : Optional[Any] = prophet_old.model
_lowerCamelCase : Any = False
for attribute in attributes:
if attribute in mapping:
_lowerCamelCase : Optional[int] = mapping[attribute]
if not hasattr(A_, A_ ) and len(A_ ) > 0:
_lowerCamelCase : int = attribute
elif hasattr(A_, A_ ):
_lowerCamelCase : int = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_lowerCamelCase : Optional[int] = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_lowerCamelCase : List[str] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_lowerCamelCase : int = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_lowerCamelCase : Union[str, Any] = True
break
elif attribute in special_keys and hasattr(A_, '''in_proj_weight''' ):
_lowerCamelCase : Tuple = old_model.in_proj_weight.shape[0] // 3
_lowerCamelCase : List[Any] = getattr(A_, A_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_lowerCamelCase : Optional[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_lowerCamelCase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_lowerCamelCase : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_lowerCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_lowerCamelCase : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_lowerCamelCase : List[str] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_lowerCamelCase : Optional[Any] = True
break
if attribute.isdigit():
_lowerCamelCase : Optional[int] = model[int(A_ )]
_lowerCamelCase : List[Any] = old_model[int(A_ )]
else:
_lowerCamelCase : List[str] = getattr(A_, A_ )
if old_attribute == "":
_lowerCamelCase : str = old_model
else:
if not hasattr(A_, A_ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_lowerCamelCase : Optional[int] = getattr(A_, A_ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 175 | 0 |
"""simple docstring"""
from typing import Any
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = data
snake_case : str = None
def __repr__( self ) -> str:
'''simple docstring'''
return F'Node({self.data})'
class _lowerCAmelCase :
def __init__( self ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = None
def __iter__( self ) -> Any:
'''simple docstring'''
snake_case : Any = self.head
while node:
yield node.data
snake_case : int = node.next
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> str:
'''simple docstring'''
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __getitem__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
snake_case : List[Any] = self.head
for _ in range(UpperCamelCase__ ):
snake_case : Tuple = current.next
snake_case : int = data
def lowerCamelCase ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(0 , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
snake_case : List[Any] = Node(UpperCamelCase__ )
if self.head is None:
snake_case : Optional[int] = new_node
elif index == 0:
snake_case : Dict = self.head # link new_node to head
snake_case : str = new_node
else:
snake_case : Optional[int] = self.head
for _ in range(index - 1 ):
snake_case : int = temp.next
snake_case : Any = temp.next
snake_case : Optional[Any] = new_node
def lowerCamelCase ( self ) -> None: # print every node data
'''simple docstring'''
print(self )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def lowerCamelCase ( self ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def lowerCamelCase ( self , UpperCamelCase__ = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
snake_case : Dict = self.head # default first node
if index == 0:
snake_case : Optional[int] = self.head.next
else:
snake_case : List[Any] = self.head
for _ in range(index - 1 ):
snake_case : Optional[int] = temp.next
snake_case : List[str] = temp.next
snake_case : Tuple = temp.next.next
return delete_node.data
def lowerCamelCase ( self ) -> bool:
'''simple docstring'''
return self.head is None
def lowerCamelCase ( self ) -> None:
'''simple docstring'''
snake_case : str = None
snake_case : Optional[int] = self.head
while current:
# Store the current node's next node.
snake_case : Any = current.next
# Make the current node's next point backwards
snake_case : Tuple = prev
# Make the previous node be the current node
snake_case : List[str] = current
# Make the current node the next node (to progress iteration)
snake_case : Any = next_node
# Return prev in order to put the head at the end
snake_case : List[str] = prev
def __lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase ) == i
linked_list.insert_nth(lowercase , i + 1 )
assert str(lowercase ) == "->".join(str(lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase ) == "->".join(str(lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase ) == 9
assert str(lowercase ) == "->".join(str(lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
snake_case : List[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase ) == "->".join(str(lowercase ) for i in range(-8 , 1 ) )
def __lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case : Any = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
snake_case : Optional[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
snake_case : Dict = linked_list.delete_head()
assert result == -9
assert (
str(lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
snake_case : Optional[Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
snake_case : List[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase )
assert (
str(lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
snake_case : str = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(lowercase )
print("\nReading/changing Node data using indexing:" )
print(F'Element at Position 1: {linked_list[1]}' )
snake_case : Union[str, Any] = input("Enter New Value: " ).strip()
print("New list:" )
print(lowercase )
print(F'length of linked_list is : {len(lowercase )}' )
if __name__ == "__main__":
main()
| 203 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case : Dict = 4
snake_case : str = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Any = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 203 | 1 |
import re
from filelock import FileLock
try:
import nltk
_snake_case = True
except (ImportError, ModuleNotFoundError):
_snake_case = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def A ( _lowerCamelCase ):
'''simple docstring'''
re.sub("<n>" , "" , _lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase ) )
| 300 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300 | 1 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = 0
__lowercase : Tuple = len(_UpperCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowercase : Dict = i + 1
else:
__lowercase : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
| 249 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=0 , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : Optional[int] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Optional[Any] = use_input_mask
lowerCAmelCase : Union[str, Any] = use_token_type_ids
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Dict = num_labels
lowerCAmelCase : List[Any] = num_choices
lowerCAmelCase : Optional[Any] = scope
lowerCAmelCase : Optional[Any] = projection_dim
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Any = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
lowerCAmelCase : Tuple = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ):
lowerCAmelCase : int = TFDPRContextEncoder(config=UpperCAmelCase_ )
lowerCAmelCase : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : List[str] = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : int = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[int] = TFDPRReader(config=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase : str = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : str = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = TFDPRModelTester(self )
lowerCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Any = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[int] = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
lowerCAmelCase : List[Any] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase : List[Any] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 138 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
print('Loading config file...' )
def flatten_yaml_as_dict(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple="" , UpperCamelCase__ : Optional[int]="." ):
_snake_case = []
for k, v in d.items():
_snake_case = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase__ , UpperCamelCase__ , sep=UpperCamelCase__ ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase__ )
_snake_case = argparse.Namespace()
with open(UpperCamelCase__ , 'r' ) as yaml_file:
try:
_snake_case = yaml.load(UpperCamelCase__ , Loader=yaml.FullLoader )
_snake_case = flatten_yaml_as_dict(UpperCamelCase__ )
for k, v in flat_cfg.items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(UpperCamelCase__ , str(UpperCamelCase__ ) ) )
return config
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> Any:
'''simple docstring'''
_snake_case = MobileViTVaConfig()
_snake_case = False
# dataset
if task_name.startswith('imagenet1k_' ):
_snake_case = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
_snake_case = 384
else:
_snake_case = 256
_snake_case = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
_snake_case = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
_snake_case = 384
else:
_snake_case = 256
_snake_case = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
_snake_case = 151
_snake_case = 512
_snake_case = 'ade20k-id2label.json'
_snake_case = True
elif task_name.startswith('voc_' ):
_snake_case = 21
_snake_case = 512
_snake_case = 'pascal-voc-id2label.json'
_snake_case = True
# orig_config
_snake_case = load_orig_config_file(UpperCamelCase__ )
assert getattr(UpperCamelCase__ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case = getattr(UpperCamelCase__ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(UpperCamelCase__ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case = getattr(UpperCamelCase__ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case = getattr(UpperCamelCase__ , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
_snake_case = getattr(UpperCamelCase__ , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
_snake_case = getattr(UpperCamelCase__ , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
_snake_case = getattr(UpperCamelCase__ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
_snake_case = 'huggingface/label-files'
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
_snake_case = dct.pop(UpperCamelCase__ )
_snake_case = val
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=False ) -> Any:
'''simple docstring'''
if base_model:
_snake_case = ''
else:
_snake_case = 'mobilevitv2.'
_snake_case = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case = k[8:]
else:
_snake_case = k
if ".block." in k:
_snake_case = k_new.replace('.block.' , '.' )
if ".conv." in k:
_snake_case = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
_snake_case = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
_snake_case = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
_snake_case = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_snake_case = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
_snake_case = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
_snake_case = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
_snake_case = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
_snake_case = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_snake_case = [0, 1]
elif i == 4:
_snake_case = [0, 1, 2, 3]
elif i == 5:
_snake_case = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
_snake_case = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
_snake_case = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
_snake_case = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_snake_case = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
_snake_case = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
_snake_case = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
_snake_case = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
_snake_case = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
_snake_case = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
_snake_case = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
_snake_case = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
_snake_case = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCamelCase__ ( UpperCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
_snake_case = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(UpperCamelCase__ )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
_snake_case = get_mobilevitva_config(UpperCamelCase__ , UpperCamelCase__ )
# load original state_dict
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
_snake_case = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ).eval()
_snake_case = False
else:
_snake_case = MobileViTVaForImageClassification(UpperCamelCase__ ).eval()
_snake_case = False
# remove and rename some keys of load the original model
_snake_case = checkpoint
remove_unused_keys(UpperCamelCase__ )
_snake_case = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load modified state_dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case = image_processor(images=prepare_img() , return_tensors='pt' )
_snake_case = model(**UpperCamelCase__ )
# verify classification model
if task_name.startswith('imagenet' ):
_snake_case = outputs.logits
_snake_case = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 295 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295 | 1 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : str = FlaxAutoencoderKL
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = 4
_lowerCamelCase : List[str] = 3
_lowerCamelCase : List[Any] = (3_2, 3_2)
_lowerCamelCase : str = jax.random.PRNGKey(0 )
_lowerCamelCase : int = jax.random.uniform(__lowerCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_lowerCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
| 72 |
"""simple docstring"""
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_UpperCAmelCase = 6
_UpperCAmelCase = 1
_UpperCAmelCase = 1901
_UpperCAmelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_UpperCAmelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_UpperCAmelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_UpperCAmelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
_UpperCAmelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 260 | 0 |
"""simple docstring"""
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
# we need a list not a string, so do something to change the type
__a = arr.split(''',''' )
def __UpperCAmelCase ( self ):
__a = [int(self.array[0] )] * len(self.array )
__a = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__a = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__a = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase_ = input("please input some numbers:")
lowercase_ = SubArray(whole_array)
lowercase_ = array.solve_sub_array()
print(("the results is:", re))
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 1 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
metadata={'help': 'The output directory where the model will be written.'} , )
lowerCAmelCase__ = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
lowerCAmelCase__ = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def lowerCamelCase_ ( ):
lowerCamelCase_ = HfArgumentParser((ModelArguments,) )
((lowerCamelCase_) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCamelCase__ , decoder_config=lowerCamelCase__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowerCamelCase_ = decoder_config.decoder_start_token_id
lowerCamelCase_ = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowerCamelCase_ = decoder_config.bos_token_id
if pad_token_id is None:
lowerCamelCase_ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowerCamelCase_ = decoder_config.eos_token_id
lowerCamelCase_ = decoder_start_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 19 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str:
if not conversation_id:
lowerCAmelCase__ : List[str] = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase__ : List[Any] = []
if generated_responses is None:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : uuid.UUID = conversation_id
lowerCAmelCase__ : List[str] = past_user_inputs
lowerCAmelCase__ : List[str] = generated_responses
lowerCAmelCase__ : Optional[str] = text
def __eq__( self ,__UpperCAmelCase ) -> Dict:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]:
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
lowerCAmelCase__ : Optional[int] = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowerCAmelCase__ : Optional[Any] = text
def UpperCAmelCase_ ( self ) -> List[Any]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase__ : Union[str, Any] = None
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
self.generated_responses.append(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Tuple:
lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowerCAmelCase__ : Any = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase__ : Tuple = self.tokenizer.eos_token
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : List[str] = {}
if min_length_for_response is not None:
lowerCAmelCase__ : Any = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase__ : Optional[int] = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ : int = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ):
lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase )
if self.framework == "pt":
lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase__ : Dict = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowerCAmelCase__ : str = max_length - minimum_tokens
lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:]
lowerCAmelCase__ : str = model_inputs.pop("""conversation""" )
lowerCAmelCase__ : Union[str, Any] = max_length
lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase )
if self.model.config.is_encoder_decoder:
lowerCAmelCase__ : int = 1
else:
lowerCAmelCase__ : int = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]:
lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""]
lowerCAmelCase__ : Tuple = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,)
lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__UpperCAmelCase )
return conversation
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id
lowerCAmelCase__ : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > self.tokenizer.model_max_length:
lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 37 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[Any]=13 ,_UpperCAmelCase : str=10 ,_UpperCAmelCase : Optional[int]=3 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : Dict=2 ,_UpperCAmelCase : Union[str, Any]=True ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : List[Any]=32 ,_UpperCAmelCase : Optional[int]=5 ,_UpperCAmelCase : Union[str, Any]=4 ,_UpperCAmelCase : Any=37 ,_UpperCAmelCase : Optional[Any]="gelu" ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : List[str]=0.1 ,_UpperCAmelCase : Union[str, Any]=10 ,_UpperCAmelCase : Optional[int]=0.02 ,_UpperCAmelCase : Optional[int]=0.9 ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[int] = parent
_a : List[str] = batch_size
_a : Any = image_size
_a : List[str] = num_channels
_a : Tuple = patch_size
_a : Tuple = tubelet_size
_a : str = num_frames
_a : List[str] = is_training
_a : Optional[Any] = use_labels
_a : int = hidden_size
_a : Optional[int] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Dict = intermediate_size
_a : str = hidden_act
_a : Union[str, Any] = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : Tuple = type_sequence_label_size
_a : int = initializer_range
_a : Any = mask_ratio
_a : Union[str, Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Optional[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_a : Optional[int] = int(mask_ratio * self.seq_length )
def __lowercase ( self : Tuple ):
_a : List[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_a : Any = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : int = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : str ):
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,)
def __lowercase ( self : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Union[str, Any] ):
_a : List[str] = VideoMAEModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Dict ,_UpperCAmelCase : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : List[str] ):
_a : Any = VideoMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a : Union[str, Any] = torch.ones((self.num_masks,) )
_a : Any = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_a : str = mask.expand(self.batch_size ,-1 ).bool()
_a : Any = model(_UpperCAmelCase ,_UpperCAmelCase )
# model only returns predictions for masked patches
_a : Dict = mask.sum().item()
_a : List[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels) )
def __lowercase ( self : Union[str, Any] ):
_a : Tuple = self.prepare_config_and_inputs()
_a : Any = config_and_inputs
_a : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCAmelCase : Tuple = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : int = VideoMAEModelTester(self )
_a : Dict = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int=False ):
_a : Tuple = copy.deepcopy(_UpperCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a : int = torch.ones((self.model_tester.num_masks,) )
_a : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_a : str = mask.expand(self.model_tester.batch_size ,-1 ).bool()
_a : Optional[int] = bool_masked_pos.to(_UpperCAmelCase )
if return_labels:
if model_class in [
*get_values(_UpperCAmelCase ),
]:
_a : Any = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_UpperCAmelCase )
return inputs_dict
def __lowercase ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
pass
def __lowercase ( self : List[Any] ):
_a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : Tuple ):
_a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[Any] = model_class(_UpperCAmelCase )
_a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[str] = [*signature.parameters.keys()]
_a : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : List[str] ):
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
@slow
def __lowercase ( self : List[str] ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = VideoMAEModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : int ):
if not self.has_attentions:
pass
else:
_a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = True
for model_class in self.all_model_classes:
_a : List[Any] = self.model_tester.seq_length - self.model_tester.num_masks
_a : int = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_a : int = True
_a : Optional[Any] = False
_a : Optional[int] = True
_a : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Optional[int] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a : Union[str, Any] = True
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
_a : Any = len(_UpperCAmelCase )
# Check attention is always last and order is fine
_a : int = True
_a : Optional[Any] = True
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
self.assertEqual(out_len + 1 ,len(_UpperCAmelCase ) )
_a : int = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def __lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int ):
_a : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Optional[Any] = outputs.hidden_states
_a : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCAmelCase ) ,_UpperCAmelCase )
_a : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
_a : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
_a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : List[Any] ):
pass
def __lowerCamelCase ( ) -> Any:
_a : List[str] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_a : Any = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Union[str, Any] ):
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
_UpperCAmelCase )
_a : Dict = self.default_image_processor
_a : Optional[int] = prepare_video()
_a : Union[str, Any] = image_processor(_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : List[str] = model(**_UpperCAmelCase )
# verify the logits
_a : Tuple = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Tuple = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
@slow
def __lowercase ( self : Tuple ):
_a : Optional[Any] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(_UpperCAmelCase )
_a : int = self.default_image_processor
_a : Tuple = prepare_video()
_a : List[Any] = image_processor(_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# add boolean mask, indicating which patches to mask
_a : Optional[int] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' ,filename='bool_masked_pos.pt' )
_a : Any = torch.load(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Union[str, Any] = model(**_UpperCAmelCase )
# verify the logits
_a : Any = torch.Size([1, 1408, 1536] )
_a : Optional[int] = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ,device=_UpperCAmelCase )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_a : Union[str, Any] = torch.tensor([0.51_42] ,device=_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss ,_UpperCAmelCase ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_a : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ,norm_pix_loss=_UpperCAmelCase ).to(
_UpperCAmelCase )
with torch.no_grad():
_a : Optional[int] = model(**_UpperCAmelCase )
_a : List[Any] = torch.tensor(torch.tensor([0.64_69] ) ,device=_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss ,_UpperCAmelCase ,atol=1E-4 ) )
| 354 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowerCAmelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowerCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowerCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[str, float]:
_a : List[Any] = len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[str, str]:
_a : Dict = random.randint(0 , len(lowerCAmelCase_ ) - 1 )
_a : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
_a : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : Optional[Any] = list(lowerCAmelCase_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_a : Optional[int] = random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> list[str]:
_a : List[str] = []
# Generate more children proportionally to the fitness score.
_a : Tuple = int(parent_a[1] * 100 ) + 1
_a : Tuple = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
_a : Any = population_score[random.randint(0 , lowerCAmelCase_ )][0]
_a , _a : Tuple = crossover(parent_a[0] , lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) )
return pop
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_a : Dict = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_a : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_a : List[Any] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
_a : Union[str, Any] = []
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_a , _a : Union[str, Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_a : Optional[Any] = [evaluate(lowerCAmelCase_ , lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
_a : Tuple = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] , reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_a : Dict = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
_a : Tuple = [
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )] , lowerCAmelCase_ , lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
__lowerCAmelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowerCAmelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 107 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase__( unittest.TestCase):
@property
def lowerCAmelCase__ ( self: List[str] ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.dummy_uncond_unet
__lowerCamelCase = KarrasVeScheduler()
__lowerCamelCase = KarrasVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(num_inference_steps=2 , generator=UpperCamelCase_ , output_type="""numpy""" ).images
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(num_inference_steps=2 , generator=UpperCamelCase_ , output_type="""numpy""" , return_dict=UpperCamelCase_ )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = """google/ncsnpp-celebahq-256"""
__lowerCamelCase = UNetaDModel.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = KarrasVeScheduler()
__lowerCamelCase = KarrasVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(num_inference_steps=20 , generator=UpperCamelCase_ , output_type="""numpy""" ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowerCamelCase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 12 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Union[str, Any] = num_choices
UpperCAmelCase__ : List[str] = scope
UpperCAmelCase__ : Optional[int] = self.vocab_size - 1
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase__ : List[str] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : str = OpenAIGPTModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase__ : Optional[int] = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case)
UpperCAmelCase__ : Optional[int] = model(_snake_case , token_type_ids=_snake_case)
UpperCAmelCase__ : List[str] = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = OpenAIGPTLMHeadModel(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = OpenAIGPTDoubleHeadsModel(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase__ : List[str] = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : Any = self.num_labels
UpperCAmelCase__ : Optional[int] = OpenAIGPTForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : int = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case__ ( self):
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase :Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase :Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase :Union[str, Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False):
UpperCAmelCase__ : Optional[int] = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase__ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
UpperCAmelCase__ : Any = inputs_dict["""labels"""]
UpperCAmelCase__ : str = inputs_dict["""labels"""]
UpperCAmelCase__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
UpperCAmelCase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case)
return inputs_dict
def snake_case__ ( self):
UpperCAmelCase__ : Dict = OpenAIGPTModelTester(self)
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_snake_case , n_embd=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case)
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case)
@slow
def snake_case__ ( self):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""")
model.to(_snake_case)
UpperCAmelCase__ : str = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_snake_case) # the president is
UpperCAmelCase__ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase__ : Optional[int] = model.generate(_snake_case , do_sample=_snake_case)
self.assertListEqual(output_ids[0].tolist() , _snake_case) | 361 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 4_0_0_0_0_0_0 ):
UpperCAmelCase__ : List[str] = [0, 1]
UpperCAmelCase__ : Any = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCAmelCase__ : str = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""") | 283 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
A: Dict = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
A: Union[str, Any] = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(UpperCamelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
A: int = primes[:idx]
break
A , A: Any = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
A: Optional[int] = False
for r in range(UpperCamelCase__ ):
A: List[Any] = pow(UpperCamelCase__ , d * 2**r , UpperCamelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
A: Dict = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def SCREAMING_SNAKE_CASE( ) -> None:
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 319 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a__: List[str] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''sequence-classification'''
def __init__( self,__lowerCamelCase ):
if type(__lowerCamelCase ) == dict:
A__ = Namespace(**__lowerCamelCase )
A__ = glue_output_modes[hparams.task]
A__ = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCamelCase,__lowerCamelCase,self.mode )
def UpperCamelCase ( self,**__lowerCamelCase ):
return self.model(**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A__ = self(**__lowerCamelCase )
A__ = outputs[0]
A__ = self.trainer.lr_schedulers[0]['''scheduler''']
A__ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase ( self ):
A__ = self.hparams
A__ = processors[args.task]()
A__ = processor.get_labels()
for mode in ["train", "dev"]:
A__ = self._feature_file(__lowerCamelCase )
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''',__lowerCamelCase )
else:
logger.info('''Creating features from dataset file at %s''',args.data_dir )
A__ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
A__ = convert_examples_to_features(
__lowerCamelCase,self.tokenizer,max_length=args.max_seq_length,label_list=self.labels,output_mode=args.glue_output_mode,)
logger.info('''Saving features into cached file %s''',__lowerCamelCase )
torch.save(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = False ):
A__ = '''dev''' if mode == '''test''' else mode
A__ = self._feature_file(__lowerCamelCase )
logger.info('''Loading features from cached file %s''',__lowerCamelCase )
A__ = torch.load(__lowerCamelCase )
A__ = torch.tensor([f.input_ids for f in features],dtype=torch.long )
A__ = torch.tensor([f.attention_mask for f in features],dtype=torch.long )
A__ = torch.tensor([f.token_type_ids for f in features],dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A__ = torch.tensor([f.label for f in features],dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A__ = torch.tensor([f.label for f in features],dtype=torch.float )
return DataLoader(
TensorDataset(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ),batch_size=__lowerCamelCase,shuffle=__lowerCamelCase,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A__ = self(**__lowerCamelCase )
A__ , A__ = outputs[:2]
A__ = logits.detach().cpu().numpy()
A__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
A__ = np.concatenate([x['''pred'''] for x in outputs],axis=0 )
if self.hparams.glue_output_mode == "classification":
A__ = np.argmax(__lowerCamelCase,axis=1 )
elif self.hparams.glue_output_mode == "regression":
A__ = np.squeeze(__lowerCamelCase )
A__ = np.concatenate([x['''target'''] for x in outputs],axis=0 )
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task,__lowerCamelCase,__lowerCamelCase )}
A__ = dict(results.items() )
A__ = results
return ret, preds_list, out_label_list
def UpperCamelCase ( self,__lowerCamelCase ):
A__ , A__ , A__ = self._eval_end(__lowerCamelCase )
A__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase ( self,__lowerCamelCase ):
A__ , A__ , A__ = self._eval_end(__lowerCamelCase )
A__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase ( __lowerCamelCase,__lowerCamelCase ):
BaseTransformer.add_model_specific_args(__lowerCamelCase,__lowerCamelCase )
parser.add_argument(
'''--max_seq_length''',default=128,type=__lowerCamelCase,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
),)
parser.add_argument(
'''--task''',default='''''',type=__lowerCamelCase,required=__lowerCamelCase,help='''The GLUE task to run''',)
parser.add_argument(
'''--gpus''',default=0,type=__lowerCamelCase,help='''The number of GPUs allocated for this, it is by default 0 meaning none''',)
parser.add_argument(
'''--overwrite_cache''',action='''store_true''',help='''Overwrite the cached training and evaluation sets''' )
return parser
def UpperCamelCase__( )->Any:
A__ = argparse.ArgumentParser()
add_generic_args(UpperCamelCase__ , os.getcwd() )
A__ = GLUETransformer.add_model_specific_args(UpperCamelCase__ , os.getcwd() )
A__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A__ = os.path.join(
'''./results''' , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
A__ = GLUETransformer(UpperCamelCase__ )
A__ = generic_train(UpperCamelCase__ , UpperCamelCase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A__ = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=UpperCamelCase__ ) )
A__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 193 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ['pixel_values']
def __init__( self :int , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :PILImageResampling = PILImageResampling.BICUBIC , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 2_55 , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = None , _lowercase :Optional[Union[float, List[float]]] = None , _lowercase :bool = True , **_lowercase :Optional[int] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = size if size is not None else {"height": 3_84, "width": 3_84}
lowercase__ = get_size_dict(_lowercase , default_to_square=_lowercase )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ = do_convert_rgb
def UpperCAmelCase ( self :int , _lowercase :np.ndarray , _lowercase :Dict[str, int] , _lowercase :PILImageResampling = PILImageResampling.BICUBIC , _lowercase :Optional[Union[str, ChannelDimension]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = get_size_dict(_lowercase , default_to_square=_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
lowercase__ = (size["height"], size["width"])
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase ( self :List[str] , _lowercase :np.ndarray , _lowercase :Union[int, float] , _lowercase :Optional[Union[str, ChannelDimension]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :np.ndarray , _lowercase :Union[float, List[float]] , _lowercase :Union[float, List[float]] , _lowercase :Optional[Union[str, ChannelDimension]] = None , **_lowercase :List[Any] , ):
'''simple docstring'''
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase ( self :str , _lowercase :ImageInput , _lowercase :Optional[bool] = None , _lowercase :Optional[Dict[str, int]] = None , _lowercase :PILImageResampling = None , _lowercase :Optional[bool] = None , _lowercase :Optional[float] = None , _lowercase :Optional[bool] = None , _lowercase :Optional[Union[float, List[float]]] = None , _lowercase :Optional[Union[float, List[float]]] = None , _lowercase :Optional[Union[str, TensorType]] = None , _lowercase :bool = None , _lowercase :ChannelDimension = ChannelDimension.FIRST , **_lowercase :str , ):
'''simple docstring'''
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(_lowercase , default_to_square=_lowercase )
lowercase__ = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
lowercase__ = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
lowercase__ = BatchFeature(data={"pixel_values": images} , tensor_type=_lowercase )
return encoded_outputs
| 371 |
from __future__ import annotations
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__ = i + 1
else:
lowercase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 201 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
a = FunnelConfig.from_json_file(a_ )
print(f'Building PyTorch model from configuration: {config}' )
a = FunnelBaseModel(a_ ) if base_model else FunnelModel(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(a_ , a_ , a_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 228 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : str = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : int = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
UpperCAmelCase : str = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
UpperCamelCase : List[str] = DistilBertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A ) != tokenize_chinese_chars
):
lowerCamelCase = getattr(A , normalizer_state.pop("""type""" ) )
lowerCamelCase = do_lower_case
lowerCamelCase = strip_accents
lowerCamelCase = tokenize_chinese_chars
lowerCamelCase = normalizer_class(**A )
lowerCamelCase = do_lower_case
def __A ( self , A , A=None ) -> Tuple:
'''simple docstring'''
lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 370 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowercase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , A ) -> Any:
'''simple docstring'''
super().__init__()
lowerCamelCase = model
lowerCamelCase = 2
lowerCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __A ( self ) -> int:
'''simple docstring'''
pass
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = LongformerModel.from_pretrained(lowerCamelCase__ )
lowerCamelCase = LightningModel(lowerCamelCase__ )
lowerCamelCase = torch.load(lowerCamelCase__ , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
lowerCamelCase = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 66 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase : Tuple = """pytorch_model.bin"""
_lowerCamelCase : List[str] = """pytorch_model.bin.index.json"""
_lowerCamelCase : Union[str, Any] = """adapter_config.json"""
_lowerCamelCase : Dict = """adapter_model.bin"""
_lowerCamelCase : str = """adapter_model.safetensors"""
_lowerCamelCase : List[str] = """tf_model.h5"""
_lowerCamelCase : List[Any] = """tf_model.h5.index.json"""
_lowerCamelCase : Dict = """model.ckpt"""
_lowerCamelCase : Union[str, Any] = """flax_model.msgpack"""
_lowerCamelCase : Optional[Any] = """flax_model.msgpack.index.json"""
_lowerCamelCase : int = """model.safetensors"""
_lowerCamelCase : Any = """model.safetensors.index.json"""
_lowerCamelCase : List[str] = """config.json"""
_lowerCamelCase : Dict = """preprocessor_config.json"""
_lowerCamelCase : List[Any] = FEATURE_EXTRACTOR_NAME
_lowerCamelCase : Tuple = """generation_config.json"""
_lowerCamelCase : Any = """modelcard.json"""
_lowerCamelCase : Tuple = """▁"""
_lowerCamelCase : Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase : Optional[int] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase : Optional[int] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase : List[str] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if version.parse(lowercase_ ) < version.parse(lowercase_ ):
if "dev" in min_version:
A__ = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
A__ = f"""This example requires a minimum version of {min_version},"""
error_message += f""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 14 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 14 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( a__ ):
lowercase_ : Optional[Any] =["image_processor", "tokenizer"]
lowercase_ : int ="CLIPImageProcessor"
lowercase_ : str =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self ,A__=None ,A__=None ,**A__):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,SCREAMING_SNAKE_CASE_ ,)
lowercase = kwargs.pop('''feature_extractor''')
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_)
def __call__( self ,A__=None ,A__=None ,A__=None ,**A__):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
lowercase = self.tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_)
if images is not None:
lowercase = self.image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_)
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) ,tensor_type=SCREAMING_SNAKE_CASE_)
def A__ ( self ,*A__ ,**A__):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_)
def A__ ( self ,*A__ ,**A__):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_)
@property
def A__ ( self):
lowercase = self.tokenizer.model_input_names
lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 364 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self ,A__ ,A__=9_9 ,A__=1_3 ,A__=1_6 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=False ,A__=True ,A__=2 ,A__=3_2 ,A__=4 ,A__=4 ,A__=3_0 ,A__=0 ,A__=1 ,A__=2 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = decoder_seq_length
# For common tests
lowercase = self.decoder_seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_labels
lowercase = vocab_size
lowercase = d_model
lowercase = d_model
lowercase = decoder_layers
lowercase = decoder_layers
lowercase = decoder_ffn_dim
lowercase = decoder_attention_heads
lowercase = decoder_attention_heads
lowercase = eos_token_id
lowercase = bos_token_id
lowercase = pad_token_id
lowercase = decoder_start_token_id
lowercase = use_cache
lowercase = max_position_embeddings
lowercase = None
lowercase = decoder_seq_length
lowercase = 2
lowercase = 1
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size)
lowercase = None
if self.use_attention_mask:
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,vocab_size=2)
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size)
lowercase = TrOCRConfig(
vocab_size=self.vocab_size ,d_model=self.d_model ,decoder_layers=self.decoder_layers ,decoder_ffn_dim=self.decoder_ffn_dim ,decoder_attention_heads=self.decoder_attention_heads ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,use_cache=self.use_cache ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,max_position_embeddings=self.max_position_embeddings ,)
return (config, input_ids, attention_mask, lm_labels)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,):
lowercase = True
lowercase = TrOCRDecoder(config=A__).to(A__).eval()
lowercase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase = model(A__ ,use_cache=A__)
lowercase = model(A__)
lowercase = model(A__ ,use_cache=A__)
self.parent.assertTrue(len(A__) == len(A__))
self.parent.assertTrue(len(A__) == len(A__) + 1)
lowercase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase = ids_tensor((2, 1) ,config.vocab_size - 1) + 1
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] ,dim=-1)
lowercase = model(A__)['''last_hidden_state''']
lowercase = model(A__ ,past_key_values=A__)['''last_hidden_state''']
# select random slice
lowercase = ids_tensor((1,) ,output_from_past.shape[-1]).item()
lowercase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A__ ,A__ ,atol=1E-3)
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase_ : Dict =(TrOCRForCausalLM,) if is_torch_available() else ()
lowercase_ : int ={'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowercase_ : List[Any] =True
lowercase_ : int =False
def A__ ( self):
lowercase = TrOCRStandaloneDecoderModelTester(self ,is_training=A__)
lowercase = ConfigTester(self ,config_class=A__)
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A__)
def A__ ( self):
return
@unittest.skip('''The model doesn\'t support left padding''') # and it's not used enough to be worth fixing :)
def A__ ( self):
pass
| 97 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : Any = BlipImageProcessor()
__snake_case : str = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
__snake_case : int = BlipProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def UpperCAmelCase ( self , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__snake_case : Tuple = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
__snake_case : Tuple = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Union[str, Any] = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Optional[int] = image_processor(__UpperCAmelCase , return_tensors="np" )
__snake_case : List[str] = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__snake_case : Optional[Any] = """lower newer"""
__snake_case : List[Any] = processor(text=__UpperCAmelCase )
__snake_case : Tuple = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : List[Any] = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Tuple = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__snake_case : Dict = """lower newer"""
__snake_case : str = self.prepare_image_inputs()
__snake_case : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Union[str, Any] = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__snake_case : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Optional[Any] = processor.batch_decode(__UpperCAmelCase )
__snake_case : Optional[int] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : int = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : int = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__snake_case : Optional[int] = """lower newer"""
__snake_case : Union[str, Any] = self.prepare_image_inputs()
__snake_case : int = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 326 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__UpperCAmelCase : Optional[Any] = quote(lowerCAmelCase__ )
return hfh.hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" , revision=lowerCAmelCase__ )
| 254 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_lowerCamelCase : Any = random.Random()
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int=1.0 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Tuple=None ) -> Optional[Any]:
if rng is None:
UpperCAmelCase : Optional[Any] = global_rng
UpperCAmelCase : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : str, __A : Optional[Any], __A : Union[str, Any]=7, __A : Optional[int]=4_0_0, __A : Any=2_0_0_0, __A : Union[str, Any]=1, __A : List[str]=0.0, __A : Dict=1_6_0_0_0, __A : List[Any]=True, __A : Optional[int]=8_0, __A : Optional[Any]=1_6, __A : Any=6_4, __A : str="hann_window", __A : Any=8_0, __A : Tuple=7_6_0_0, __A : str=1E-10, __A : List[Any]=True, ):
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Tuple = min_seq_length
UpperCAmelCase : int = max_seq_length
UpperCAmelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase : Union[str, Any] = feature_size
UpperCAmelCase : Tuple = padding_value
UpperCAmelCase : str = sampling_rate
UpperCAmelCase : Dict = do_normalize
UpperCAmelCase : int = num_mel_bins
UpperCAmelCase : Dict = hop_length
UpperCAmelCase : Dict = win_length
UpperCAmelCase : Dict = win_function
UpperCAmelCase : int = fmin
UpperCAmelCase : str = fmax
UpperCAmelCase : List[str] = mel_floor
UpperCAmelCase : List[str] = return_attention_mask
def __magic_name__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __magic_name__ ( self : Optional[Any], __A : Optional[int]=False, __A : Union[str, Any]=False ):
def _flatten(__A : Union[str, Any] ):
return list(itertools.chain(*__A ) )
if equal_length:
UpperCAmelCase : int = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCAmelCase : int = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
def __magic_name__ ( self : int, __A : List[str]=False, __A : Optional[int]=False ):
if equal_length:
UpperCAmelCase : Dict = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase : Dict = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCAmelCase : int = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = SpeechTaFeatureExtractor
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : str = SpeechTaFeatureExtractionTester(self )
def __magic_name__ ( self : Union[str, Any], __A : str ):
self.assertTrue(np.all(np.mean(__A, axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A, axis=0 ) - 1 ) < 1E-3 ) )
def __magic_name__ ( self : Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase : int = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )]
UpperCAmelCase : Optional[Any] = [np.asarray(__A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase : Tuple = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values
UpperCAmelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__A, __A, atol=1E-3 ) )
# Test batched
UpperCAmelCase : Optional[Any] = feat_extract(__A, return_tensors='''np''' ).input_values
UpperCAmelCase : int = feat_extract(__A, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__A, __A ):
self.assertTrue(np.allclose(__A, __A, atol=1E-3 ) )
def __magic_name__ ( self : str ):
UpperCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )]
UpperCAmelCase : Optional[int] = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase : str = [None, 1_6_0_0, None]
for max_length, padding in zip(__A, __A ):
UpperCAmelCase : Tuple = feat_extract(__A, padding=__A, max_length=__A, return_tensors='''np''' )
UpperCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : List[str] = range(8_0_0, 1_4_0_0, 2_0_0 )
UpperCAmelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase : List[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase : str = [None, 1_6_0_0, None]
for max_length, padding in zip(__A, __A ):
UpperCAmelCase : List[Any] = feat_extract(__A, max_length=__A, padding=__A )
UpperCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : str = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )]
UpperCAmelCase : Union[str, Any] = feat_extract(
__A, truncation=__A, max_length=1_0_0_0, padding='''max_length''', return_tensors='''np''' )
UpperCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )]
UpperCAmelCase : Tuple = feat_extract(
__A, truncation=__A, max_length=1_0_0_0, padding='''longest''', return_tensors='''np''' )
UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )]
UpperCAmelCase : str = feat_extract(
__A, truncation=__A, max_length=2_0_0_0, padding='''longest''', return_tensors='''np''' )
UpperCAmelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase : Any = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase : List[Any] = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase : List[str] = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __magic_name__ ( self : Optional[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )]
UpperCAmelCase : List[str] = [np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase : Tuple = feature_extractor(audio_target=__A, padding=__A, return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase : Dict = feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_values
UpperCAmelCase : List[Any] = feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__A, __A, atol=1E-3 ) )
# Test batched
UpperCAmelCase : Optional[Any] = feature_extractor(__A, return_tensors='''np''' ).input_values
UpperCAmelCase : Dict = feature_extractor(__A, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__A, __A ):
self.assertTrue(np.allclose(__A, __A, atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase : Any = np.asarray(__A )
UpperCAmelCase : Dict = feature_extractor(__A, return_tensors='''np''' ).input_values
UpperCAmelCase : Tuple = feature_extractor(__A, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__A, __A ):
self.assertTrue(np.allclose(__A, __A, atol=1E-3 ) )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase : Optional[int] = feat_extract.model_input_names[0]
UpperCAmelCase : Dict = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__A ) == len(__A ) for x, y in zip(__A, processed_features[input_name] ) ) )
UpperCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__A )
UpperCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs}, tensor_type='''np''' )
UpperCAmelCase : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__A )
UpperCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase : List[str] = feat_extract.model_input_names[0]
UpperCAmelCase : Dict = BatchFeature({input_name: speech_inputs}, tensor_type='''pt''' )
UpperCAmelCase : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase : str = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase : Any = feat_extract.model_input_names[0]
UpperCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase : Union[str, Any] = feat_extract.num_mel_bins # hack!
UpperCAmelCase : List[str] = feat_extract.pad(__A, padding='''longest''', return_tensors='''np''' )[input_name]
UpperCAmelCase : Dict = feat_extract.pad(__A, padding='''longest''', return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __magic_name__ ( self : int ):
UpperCAmelCase : List[str] = self.feat_extract_dict
UpperCAmelCase : Any = True
UpperCAmelCase : Dict = self.feature_extraction_class(**__A )
UpperCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase : Any = [len(__A ) for x in speech_inputs]
UpperCAmelCase : Tuple = feat_extract.model_input_names[0]
UpperCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase : List[Any] = feat_extract.num_mel_bins # hack!
UpperCAmelCase : str = feat_extract.pad(__A, padding='''longest''', return_tensors='''np''' )
self.assertIn('''attention_mask''', __A )
self.assertListEqual(list(processed.attention_mask.shape ), list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(), __A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Dict = self.feat_extract_dict
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Optional[int] = self.feature_extraction_class(**__A )
UpperCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase : Union[str, Any] = [len(__A ) for x in speech_inputs]
UpperCAmelCase : str = feat_extract.model_input_names[0]
UpperCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase : Optional[Any] = min(__A )
UpperCAmelCase : List[Any] = feat_extract.num_mel_bins # hack!
UpperCAmelCase : str = feat_extract.pad(
__A, padding='''max_length''', max_length=__A, truncation=__A, return_tensors='''np''' )
self.assertIn('''attention_mask''', __A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ), [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(), [max_length for x in speech_inputs] )
def __magic_name__ ( self : List[Any], __A : List[Any] ):
from datasets import load_dataset
UpperCAmelCase : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase : Optional[Any] = ds.sort('''id''' ).select(range(__A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __magic_name__ ( self : Optional[Any] ):
# fmt: off
UpperCAmelCase : Dict = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase : Optional[int] = self._load_datasamples(1 )
UpperCAmelCase : Optional[Any] = SpeechTaFeatureExtractor()
UpperCAmelCase : str = feature_extractor(__A, return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape, (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0], __A, atol=1E-6 ) )
def __magic_name__ ( self : Union[str, Any] ):
# fmt: off
UpperCAmelCase : List[Any] = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
UpperCAmelCase : int = self._load_datasamples(1 )
UpperCAmelCase : Tuple = SpeechTaFeatureExtractor()
UpperCAmelCase : List[str] = feature_extractor(audio_target=__A, return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape, (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0], __A, atol=1E-4 ) )
| 366 |
import torch
from torch import nn
class __UpperCAmelCase ( nn.Module ):
def __init__( self : List[Any], __A : List[Any], __A : Optional[Any], __A : int, __A : List[Any], __A : int=1, __A : List[str]=False ):
super().__init__()
UpperCAmelCase : Union[str, Any] = n_token
UpperCAmelCase : List[str] = d_embed
UpperCAmelCase : Dict = d_proj
UpperCAmelCase : List[Any] = cutoffs + [n_token]
UpperCAmelCase : Dict = [0] + self.cutoffs
UpperCAmelCase : int = div_val
UpperCAmelCase : Union[str, Any] = self.cutoffs[0]
UpperCAmelCase : str = len(self.cutoffs ) - 1
UpperCAmelCase : Optional[int] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase : str = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase : Dict = nn.ModuleList()
UpperCAmelCase : Optional[int] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__A, __A ) ) )
else:
self.out_projs.append(__A )
self.out_layers.append(nn.Linear(__A, __A ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase , UpperCAmelCase : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : str = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__A, __A ) ) )
self.out_layers.append(nn.Linear(__A, r_idx - l_idx ) )
UpperCAmelCase : Optional[int] = keep_order
def __magic_name__ ( self : Union[str, Any], __A : List[str], __A : Any, __A : Dict, __A : Optional[Any] ):
if proj is None:
UpperCAmelCase : List[Any] = nn.functional.linear(__A, __A, bias=__A )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase : Union[str, Any] = nn.functional.linear(__A, proj.t().contiguous() )
UpperCAmelCase : Optional[int] = nn.functional.linear(__A, __A, bias=__A )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __magic_name__ ( self : int, __A : int, __A : List[Any]=None, __A : Dict=False ):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase : List[Any] = hidden[..., :-1, :].contiguous()
UpperCAmelCase : Any = labels[..., 1:].contiguous()
UpperCAmelCase : Optional[Any] = hidden.view(-1, hidden.size(-1 ) )
UpperCAmelCase : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
UpperCAmelCase : str = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase : List[str] = self._compute_logit(__A, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
UpperCAmelCase : Optional[int] = labels != -1_0_0
UpperCAmelCase : Dict = torch.zeros_like(__A, dtype=hidden.dtype, device=hidden.device )
UpperCAmelCase : Any = (
-nn.functional.log_softmax(__A, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase : Any = nn.functional.log_softmax(__A, dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : List[str] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase : List[Any] = self.out_layers[i].weight
UpperCAmelCase : Dict = self.out_layers[i].bias
if i == 0:
UpperCAmelCase : List[str] = torch.cat([weight_i, self.cluster_weight], dim=0 )
UpperCAmelCase : List[Any] = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(__A )
biases.append(__A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase : Dict = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : int = nn.functional.log_softmax(__A, dim=1 )
if labels is None:
UpperCAmelCase : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase : Union[str, Any] = torch.zeros_like(__A, dtype=hidden.dtype, device=hidden.device )
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Any = [0] + self.cutoffs
for i in range(len(__A ) - 1 ):
UpperCAmelCase , UpperCAmelCase : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase : List[str] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase : Tuple = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase : Any = labels.index_select(0, __A ) - l_idx
UpperCAmelCase : Dict = head_logprob.index_select(0, __A )
UpperCAmelCase : List[str] = hidden.index_select(0, __A )
else:
UpperCAmelCase : Tuple = hidden
if i == 0:
if labels is not None:
UpperCAmelCase : Union[str, Any] = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase : List[str] = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : Dict = nn.functional.log_softmax(__A, dim=1 )
UpperCAmelCase : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase : Union[str, Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase : int = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase : Optional[Any] = logprob_i
if labels is not None:
if (hasattr(self, '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0, __A, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __magic_name__ ( self : Tuple, __A : List[str] ):
if self.n_clusters == 0:
UpperCAmelCase : int = self._compute_logit(__A, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(__A, dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase : List[str] = self.out_layers[i].weight
UpperCAmelCase : Dict = self.out_layers[i].bias
if i == 0:
UpperCAmelCase : Dict = torch.cat([weight_i, self.cluster_weight], dim=0 )
UpperCAmelCase : str = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(__A )
biases.append(__A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase : int = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase : Dict = nn.functional.log_softmax(__A, dim=1 )
UpperCAmelCase : List[str] = [0] + self.cutoffs
for i in range(len(__A ) - 1 ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase : Any = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase : Tuple = self._compute_logit(__A, __A, __A, __A )
UpperCAmelCase : List[Any] = nn.functional.log_softmax(__A, dim=1 )
UpperCAmelCase : Optional[int] = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase : Optional[Any] = logprob_i
return out
| 99 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :Any = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] ='''falcon'''
lowercase_ : Union[str, Any] =['''past_key_values''']
def __init__( self ,A__=6_5_0_2_4 ,A__=4_5_4_4 ,A__=3_2 ,A__=7_1 ,A__=1E-5 ,A__=0.02 ,A__=True ,A__=0.0 ,A__=0.0 ,A__=None ,A__=False ,A__=False ,A__=True ,A__=True ,A__=False ,A__=1_1 ,A__=1_1 ,**A__ ,):
lowercase = vocab_size
# Backward compatibility with n_embed kwarg
lowercase = kwargs.pop('''n_embed''' ,A__)
lowercase = hidden_size if n_embed is None else n_embed
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = use_cache
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = bos_token_id
lowercase = eos_token_id
lowercase = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase = alibi
lowercase = new_decoder_architecture
lowercase = multi_query # Ignored when new_decoder_architecture is True
lowercase = parallel_attn
lowercase = bias
super().__init__(bos_token_id=A__ ,eos_token_id=A__ ,**A__)
@property
def A__ ( self):
return self.hidden_size // self.num_attention_heads
@property
def A__ ( self):
return not self.alibi
| 101 | import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_ : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCamelCase_ : str = dict(zip(snake_case , range(len(snake_case ) ) ) )
UpperCamelCase_ : Tuple = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = 'lower newer'
UpperCamelCase_ : Optional[int] = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : int = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase_ : List[str] = 'lower'
UpperCamelCase_ : Optional[int] = ['low', 'er</w>']
UpperCamelCase_ : Optional[Any] = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCamelCase_ : List[Any] = tokens + ['<unk>']
UpperCamelCase_ : int = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
UpperCamelCase_ : int = tokenizer.encode('sequence builders' , add_special_tokens=snake_case )
UpperCamelCase_ : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case )
UpperCamelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(snake_case )
UpperCamelCase_ : Any = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 175 | 0 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
snake_case_ = """sshleifer/bart-tiny-random"""
snake_case_ = """patrickvonplaten/t5-tiny-random"""
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return AutoConfig.from_pretrained(lowercase_ )
def UpperCAmelCase__ ( self :List[str] ) -> Optional[int]:
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCAmelCase__ ( self :List[str] ) -> Dict:
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict:
with self.assertRaises(lowercase_ ):
create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=lowercase_ , d=lowercase_ )
| 181 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = WavaVecaPhonemeCTCTokenizer
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
super().setUp()
UpperCAmelCase = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Any , lowercase_ :Union[str, Any]=False , lowercase_ :int=20 , lowercase_ :Dict=5 ) -> Tuple[str, list]:
UpperCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_ )) for i in range(len(lowercase_ ) )]
UpperCAmelCase = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase_ ) , lowercase_ ) )
if max_length is not None and len(lowercase_ ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0:
while len(lowercase_ ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
if " " not in output_txt and len(lowercase_ ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_ )
)
if with_prefix_space:
UpperCAmelCase = ' ' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
return output_txt, output_ids
def UpperCAmelCase__ ( self :Union[str, Any] , **lowercase_ :Union[str, Any] ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase__ ( self :int ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
UpperCAmelCase = tokenizer('m xxx ɪ' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
UpperCAmelCase = tokenizer('m aaa ɪ ccc' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase = tokenizer('maɪ c' , do_phonemize=lowercase_ ).input_ids
self.assertEqual(lowercase_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def UpperCAmelCase__ ( self :Dict ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCAmelCase = tokenizer.decode(sample_ids[0] )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def UpperCAmelCase__ ( self :Any ) -> str:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(lowercase_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def UpperCAmelCase__ ( self :Any ) -> Any:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids )
def UpperCAmelCase__ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase = tokenizer.decode(sample_ids[0] )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
UpperCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase_ )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , batch_tokens[0] )
self.assertEqual(lowercase_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def UpperCAmelCase__ ( self :int ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' )
UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=lowercase_ )
UpperCAmelCase = 'Hello how are you'
UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='en-us' ).input_ids
UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(lowercase_ , lowercase_ )
UpperCAmelCase = tokenizer.decode(lowercase_ )
UpperCAmelCase = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(lowercase_ , 'ɛ l o h aʊ a ʁ j u' )
def UpperCAmelCase__ ( self :int ) -> List[Any]:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCAmelCase = 'Hello how Are you'
UpperCAmelCase = 'hello how are you'
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> int:
UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
UpperCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :List[str] , lowercase_ :List[str] ) -> List[str]:
UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase__ ( self :str ) -> Optional[int]:
UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCAmelCase = tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ , filter_word_delimiter_token=lowercase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(lowercase_ :List[Any] , lowercase_ :str ):
self.assertTrue(isinstance(lowercase_ , lowercase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowercase_ ) )
# transform list to ModelOutput
UpperCAmelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(lowercase_ :Any , lowercase_ :str ):
if isinstance(lowercase_ , lowercase_ ):
[recursive_check(lowercase_ , lowercase_ ) for la, la in zip(lowercase_ , lowercase_ )]
self.assertEqual(lowercase_ , lowercase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , output_char_offsets=lowercase_ )
UpperCAmelCase = [tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ ) for ids in sample_ids]
check_list_tuples_equal(lowercase_ , lowercase_ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def UpperCAmelCase__ ( self :Any ) -> str:
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def UpperCAmelCase__ ( self :str ) -> List[str]:
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def UpperCAmelCase__ ( self :List[str] ) -> int:
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
pass
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCAmelCase = tokenizer.add_tokens(lowercase_ )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size + len(lowercase_ ) )
UpperCAmelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCAmelCase = tokenizer.add_special_tokens(lowercase_ )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size_a + len(lowercase_ ) )
UpperCAmelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase__ ( self :Tuple ) -> Optional[Any]:
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase__ ( self :int ) -> Any:
pass
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
UpperCAmelCase = self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase_ )
self.assertIsInstance(output['text'] , lowercase_ )
| 181 | 1 |
_lowerCAmelCase : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
A_ : int = [False] * len(_lowerCAmelCase )
A_ : Optional[int] = [s]
A_ : List[Any] = True
while queue:
A_ : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCAmelCase )
A_ : Optional[Any] = True
A_ : Optional[Any] = u
return visited[t]
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : int ) -> Optional[int]:
A_ : str = [-1] * (len(_lowerCAmelCase ))
A_ : List[Any] = 0
A_ : str = []
A_ : Union[str, Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
A_ : List[Any] = float("Inf" )
A_ : Tuple = sink
while s != source:
# Find the minimum value in select path
A_ : Optional[Any] = min(_lowerCAmelCase , graph[parent[s]][s] )
A_ : int = parent[s]
max_flow += path_flow
A_ : str = sink
while v != source:
A_ : Optional[int] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A_ : str = parent[v]
for i in range(len(_lowerCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 300 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase )
A_ : List[str] = nn.functional.normalize(_lowerCAmelCase )
return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self :int , snake_case :CLIPConfig ):
'''simple docstring'''
super().__init__(snake_case )
A_ : int = CLIPVisionModel(config.vision_config )
A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case )
A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case )
A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case )
A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case )
A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output
A_ : List[Any] = self.visual_projection(snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy()
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy()
A_ : Union[str, Any] = []
A_ : Any = image_embeds.shape[0]
for i in range(snake_case ):
A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A_ : Optional[Any] = special_cos_dist[i][concept_idx]
A_ : Tuple = self.special_care_embeds_weights[concept_idx].item()
A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A_ : Any = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A_ : Tuple = cos_dist[i][concept_idx]
A_ : Tuple = self.concept_embeds_weights[concept_idx].item()
A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case )
result.append(snake_case )
A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ):
'''simple docstring'''
A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output
A_ : int = self.visual_projection(snake_case )
A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds )
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
A_ : Optional[Any] = special_care * 0.01
A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 300 | 1 |
from datetime import datetime as dt
import os
from github import Github
lowerCamelCase__ : List[str] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def UpperCAmelCase_ ( ) -> int:
SCREAMING_SNAKE_CASE_ = Github(os.environ['GITHUB_TOKEN'] )
SCREAMING_SNAKE_CASE_ = g.get_repo('huggingface/transformers' )
SCREAMING_SNAKE_CASE_ = repo.get_issues(state='open' )
for issue in open_issues:
SCREAMING_SNAKE_CASE_ = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_snake_case )
SCREAMING_SNAKE_CASE_ = comments[0] if len(_snake_case ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main() | 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "speech_to_text"
lowercase_ = ["past_key_values"]
lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[int]=10_000 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=2_048 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any="relu" , _lowerCAmelCase : Any=256 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Optional[int]=6_000 , _lowerCAmelCase : Tuple=1_024 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : str=(5, 5) , _lowerCAmelCase : Optional[int]=1_024 , _lowerCAmelCase : List[Any]=80 , _lowerCAmelCase : List[Any]=1 , **_lowerCAmelCase : List[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ = max_source_positions
SCREAMING_SNAKE_CASE_ = max_target_positions
SCREAMING_SNAKE_CASE_ = num_conv_layers
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = conv_channels
SCREAMING_SNAKE_CASE_ = input_feat_per_channel
SCREAMING_SNAKE_CASE_ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) | 210 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _A (self ):
__lowercase, __lowercase= FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
__lowercase= 'A painting of a squirrel eating a burger'
__lowercase= jax.device_count()
__lowercase= num_samples * [prompt]
__lowercase= sd_pipe.prepare_inputs(lowerCAmelCase )
__lowercase= replicate(lowerCAmelCase )
__lowercase= shard(lowerCAmelCase )
__lowercase= jax.random.PRNGKey(0 )
__lowercase= jax.random.split(lowerCAmelCase , jax.device_count() )
__lowercase= sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=2_5 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__lowercase= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase= images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowercase= jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase= jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _A (self ):
__lowercase= 'stabilityai/stable-diffusion-2'
__lowercase, __lowercase= FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder='scheduler' )
__lowercase, __lowercase= FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision='bf16' , dtype=jnp.bfloataa , )
__lowercase= scheduler_params
__lowercase= 'A painting of a squirrel eating a burger'
__lowercase= jax.device_count()
__lowercase= num_samples * [prompt]
__lowercase= sd_pipe.prepare_inputs(lowerCAmelCase )
__lowercase= replicate(lowerCAmelCase )
__lowercase= shard(lowerCAmelCase )
__lowercase= jax.random.PRNGKey(0 )
__lowercase= jax.random.split(lowerCAmelCase , jax.device_count() )
__lowercase= sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=2_5 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__lowercase= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase= images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowercase= jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase= jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 295 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A ( A_ ):
UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : int =DPRContextEncoderTokenizer
class A ( A_ ):
UpperCamelCase_ : Any =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer
lowerCAmelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(A_ )
class A :
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
elif titles is None or texts is None:
__lowercase= titles if texts is None else texts
return super().__call__(
lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles]
__lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts]
__lowercase= len(lowerCAmelCase )
__lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'
__lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase )
]
}
if return_attention_mask is not False:
__lowercase= []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase= attention_mask
return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ):
__lowercase= reader_input['input_ids']
__lowercase, __lowercase, __lowercase= reader_output[:3]
__lowercase= len(lowerCAmelCase )
__lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ )
__lowercase= []
for doc_id in sorted_docs:
__lowercase= list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase= sequence_ids.index(self.pad_token_id )
else:
__lowercase= len(lowerCAmelCase )
__lowercase= self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= []
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase )
__lowercase= []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
__lowercase= end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A_ )
class A ( A_ , A_ ):
UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : Dict =DPRReaderTokenizer
| 295 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=32 , _UpperCamelCase=16 , _UpperCamelCase=3 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=4 , _UpperCamelCase=[0, 1, 2, 3] , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=[1, 3_84, 24, 24] , _UpperCamelCase=True , _UpperCamelCase=None , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = backbone_out_indices
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = backbone_featmap_shape
lowerCAmelCase__ = scope
lowerCAmelCase__ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 1_92, 3_84, 7_68],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = DPTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DPTForDepthEstimation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(_UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DPTForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : List[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = DPTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(_UpperCamelCase )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
if model_class in get_values(_UpperCamelCase ):
continue
lowerCAmelCase__ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
lowerCAmelCase__ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowerCAmelCase__ = model(**_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = False
lowerCAmelCase__ = True
if model_class in get_values(_UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
lowerCAmelCase__ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase__ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowerCAmelCase__ = model(**_UpperCamelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
lowerCAmelCase__ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowerCAmelCase__ = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowerCAmelCase__ = DPTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 'add'
with self.assertRaises(_UpperCamelCase ):
lowerCAmelCase__ = DPTForDepthEstimation(_UpperCamelCase )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
lowerCAmelCase__ = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCamelCase )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**_UpperCamelCase )
lowerCAmelCase__ = outputs.predicted_depth
# verify the predicted depth
lowerCAmelCase__ = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , _UpperCamelCase , atol=1E-4 ) )
| 122 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__snake_case : Any = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
__snake_case : List[Any] = None
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=UpperCamelCase_ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=UpperCamelCase_ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCamelCase ( UpperCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase__ = bool(qa['answers']['text'] )
return qid_to_has_ans
def _UpperCamelCase ( UpperCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
def remove_articles(UpperCamelCase_ : Optional[int] ):
return ARTICLES_REGEX.sub(' ' , UpperCamelCase_ )
def white_space_fix(UpperCamelCase_ : Any ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase_ : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase_ ) ) ) )
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if not s:
return []
return normalize_answer(UpperCamelCase_ ).split()
def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
return int(normalize_answer(UpperCamelCase_ ) == normalize_answer(UpperCamelCase_ ) )
def _UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = get_tokens(UpperCamelCase_ )
lowerCAmelCase__ = get_tokens(UpperCamelCase_ )
lowerCAmelCase__ = collections.Counter(UpperCamelCase_ ) & collections.Counter(UpperCamelCase_ )
lowerCAmelCase__ = sum(common.values() )
if len(UpperCamelCase_ ) == 0 or len(UpperCamelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCAmelCase__ = 1.0 * num_same / len(UpperCamelCase_ )
lowerCAmelCase__ = 1.0 * num_same / len(UpperCamelCase_ )
lowerCAmelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase__ = qa['id']
lowerCAmelCase__ = [t for t in qa['answers']['text'] if normalize_answer(UpperCamelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase__ = ['']
if qid not in preds:
print(F"Missing prediction for {qid}" )
continue
lowerCAmelCase__ = preds[qid]
# Take max over all gold answers
lowerCAmelCase__ = max(compute_exact(UpperCamelCase_ , UpperCamelCase_ ) for a in gold_answers )
lowerCAmelCase__ = max(compute_fa(UpperCamelCase_ , UpperCamelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ = {}
for qid, s in scores.items():
lowerCAmelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase__ = float(not qid_to_has_ans[qid] )
else:
lowerCAmelCase__ = s
return new_scores
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
if not qid_list:
lowerCAmelCase__ = len(UpperCamelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowerCAmelCase__ = len(UpperCamelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
for k in new_eval:
lowerCAmelCase__ = new_eval[k]
def _UpperCamelCase ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
plt.step(UpperCamelCase_ , UpperCamelCase_ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(UpperCamelCase_ , UpperCamelCase_ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCamelCase_ )
plt.savefig(UpperCamelCase_ )
plt.clf()
def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Any=None ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : na_probs[k] )
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = 1.0
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = [1.0]
lowerCAmelCase__ = [0.0]
lowerCAmelCase__ = 0.0
for i, qid in enumerate(UpperCamelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase__ = true_pos / float(i + 1 )
lowerCAmelCase__ = true_pos / float(UpperCamelCase_ )
if i == len(UpperCamelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCamelCase_ )
recalls.append(UpperCamelCase_ )
if out_image:
plot_pr_curve(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return {"ap": 100.0 * avg_prec}
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
if out_image_dir and not os.path.exists(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
lowerCAmelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCAmelCase__ = make_precision_recall_eval(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , out_image=os.path.join(UpperCamelCase_ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowerCAmelCase__ = make_precision_recall_eval(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , out_image=os.path.join(UpperCamelCase_ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowerCAmelCase__ = {k: float(UpperCamelCase_ ) for k, v in qid_to_has_ans.items()}
lowerCAmelCase__ = make_precision_recall_eval(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , out_image=os.path.join(UpperCamelCase_ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'pr_exact' )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'pr_f1' )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'pr_oracle' )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> int:
"""simple docstring"""
if not qid_list:
return
lowerCAmelCase__ = [na_probs[k] for k in qid_list]
lowerCAmelCase__ = np.ones_like(UpperCamelCase_ ) / float(len(UpperCamelCase_ ) )
plt.hist(UpperCamelCase_ , weights=UpperCamelCase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(UpperCamelCase_ , F"na_prob_hist_{name}.png" ) )
plt.clf()
def _UpperCamelCase ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCAmelCase__ = num_no_ans
lowerCAmelCase__ = cur_score
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : na_probs[k] )
for i, qid in enumerate(UpperCamelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase__ = scores[qid]
else:
if preds[qid]:
lowerCAmelCase__ = -1
else:
lowerCAmelCase__ = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase__ = cur_score
lowerCAmelCase__ = na_probs[qid]
return 100.0 * best_score / len(UpperCamelCase_ ), best_thresh
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = find_best_thresh(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = find_best_thresh(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = best_exact
lowerCAmelCase__ = exact_thresh
lowerCAmelCase__ = best_fa
lowerCAmelCase__ = fa_thresh
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
with open(OPTS.data_file ) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_ )
lowerCAmelCase__ = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_ )
else:
lowerCAmelCase__ = {k: 0.0 for k in preds}
lowerCAmelCase__ = make_qid_to_has_ans(UpperCamelCase_ ) # maps qid to True/False
lowerCAmelCase__ = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase__ , lowerCAmelCase__ = get_raw_scores(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = apply_no_ans_threshold(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , OPTS.na_prob_thresh )
lowerCAmelCase__ = apply_no_ans_threshold(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , OPTS.na_prob_thresh )
lowerCAmelCase__ = make_eval_dict(UpperCamelCase_ , UpperCamelCase_ )
if has_ans_qids:
lowerCAmelCase__ = make_eval_dict(UpperCamelCase_ , UpperCamelCase_ , qid_list=UpperCamelCase_ )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'HasAns' )
if no_ans_qids:
lowerCAmelCase__ = make_eval_dict(UpperCamelCase_ , UpperCamelCase_ , qid_list=UpperCamelCase_ )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , OPTS.out_image_dir )
histogram_na_prob(UpperCamelCase_ , UpperCamelCase_ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(UpperCamelCase_ , UpperCamelCase_ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
else:
print(json.dumps(UpperCamelCase_ , indent=2 ) )
if __name__ == "__main__":
__snake_case : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 122 | 1 |
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> str:
# we need a list not a string, so do something to change the type
_A : Any = arr.split(",")
def _lowerCamelCase ( self) -> List[str]:
_A : List[Any] = [int(self.array[0])] * len(self.array)
_A : List[Any] = [int(self.array[0])] * len(self.array)
for i in range(1 , len(self.array)):
_A : Tuple = max(
int(self.array[i]) + sum_value[i - 1] , int(self.array[i]))
_A : Any = max(sum_value[i] , rear[i - 1])
return rear[len(self.array) - 1]
if __name__ == "__main__":
lowerCAmelCase__ = input('please input some numbers:')
lowerCAmelCase__ = SubArray(whole_array)
lowerCAmelCase__ = array.solve_sub_array()
print(('the results is:', re))
| 11 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> Any:
stooge(__a , 0 , len(__a ) - 1 )
return arr
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_a : List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_a : Optional[Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__a , __a , (h - t) )
# Recursively sort last 2/3 elements
stooge(__a , i + t , (__a) )
# Recursively sort first 2/3 elements
stooge(__a , __a , (h - t) )
if __name__ == "__main__":
__lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 350 |
'''simple docstring'''
import heapq
def __lowerCamelCase ( lowerCAmelCase_ ) -> set[int]:
_a : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCAmelCase_ , [-1 * len(lowerCAmelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_a : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_a : Optional[int] = heapq.heappop(lowerCAmelCase_ )[1][0]
chosen_vertices.add(lowerCAmelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_a : Dict = elem[1][1].index(lowerCAmelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCAmelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 107 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
def __init__( self : List[Any] , a : Any ):
"""simple docstring"""
__lowerCamelCase = data
__lowerCamelCase = None
class a__ :
def __init__( self : Any ):
"""simple docstring"""
__lowerCamelCase = None
__lowerCamelCase = None
def __iter__( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.head
while self.head:
yield node.data
__lowerCamelCase = node.next
if node == self.head:
break
def __len__( self : Any ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Union[str, Any] ):
"""simple docstring"""
return "->".join(str(a ) for item in iter(self ) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Any ):
"""simple docstring"""
self.insert_nth(len(self ) , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any ):
"""simple docstring"""
self.insert_nth(0 , a )
def SCREAMING_SNAKE_CASE__ ( self : int , a : int , a : Any ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
__lowerCamelCase = Node(a )
if self.head is None:
__lowerCamelCase = new_node # first node points itself
__lowerCamelCase = __lowerCamelCase = new_node
elif index == 0: # insert at head
__lowerCamelCase = self.head
__lowerCamelCase = __lowerCamelCase = new_node
else:
__lowerCamelCase = self.head
for _ in range(index - 1 ):
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next
__lowerCamelCase = new_node
if index == len(self ) - 1: # insert at tail
__lowerCamelCase = new_node
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return self.delete_nth(0 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : int = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
__lowerCamelCase = self.head
if self.head == self.tail: # just one node
__lowerCamelCase = __lowerCamelCase = None
elif index == 0: # delete head node
__lowerCamelCase = self.tail.next.next
__lowerCamelCase = self.head.next
else:
__lowerCamelCase = self.head
for _ in range(index - 1 ):
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next.next
if index == len(self ) - 1: # delete at tail
__lowerCamelCase = temp
return delete_node.data
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return len(self ) == 0
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase = CircularLinkedList()
assert len(UpperCamelCase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase__ ) == i
circular_linked_list.insert_nth(UpperCamelCase__ , i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[tuple[float, float]] ) -> Tuple:
a = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
a = len(__lowerCamelCase ) - 1
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__lowerCamelCase ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = self.basis_function(__lowerCamelCase )
a = 0.0
a = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float = 0.01 ) -> List[str]:
from matplotlib import pyplot as plt # type: ignore
a = [] # x coordinates of points to plot
a = [] # y coordinates of points to plot
a = 0.0
while t <= 1:
a = self.bezier_curve_function(__lowerCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
a = [i[0] for i in self.list_of_points]
a = [i[1] for i in self.list_of_points]
plt.plot(
__lowerCamelCase , __lowerCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__lowerCamelCase , __lowerCamelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 107 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
UpperCamelCase__ = {"""allegro/herbert-base-cased""": 514}
UpperCamelCase__ = {}
class a__ ( snake_case__ ):
_a : Dict = VOCAB_FILES_NAMES
_a : int = PRETRAINED_VOCAB_FILES_MAP
_a : str = PRETRAINED_INIT_CONFIGURATION
_a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int = HerbertTokenizer
def __init__( self , _A=None , _A=None , _A=None , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A="</s>" , **_A , ):
"""simple docstring"""
super().__init__(
_A , _A , tokenizer_file=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , sep_token=_A , **_A , )
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 360 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class a__ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=9_9 , _A=3_2 , _A=4 , _A=4 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=0.02 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = None
__lowerCAmelCase = vocab_size - 1
__lowerCAmelCase = vocab_size - 1
__lowerCAmelCase = vocab_size - 1
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(_A )
__lowerCAmelCase = model.init_cache(input_ids.shape[0] , _A )
__lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , )
__lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=_A , past_key_values=outputs_cache.past_key_values , position_ids=_A , )
__lowerCAmelCase = model(_A )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(_A )
__lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase = model.init_cache(input_ids.shape[0] , _A )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , )
__lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_A , position_ids=_A , )
__lowerCAmelCase = model(_A , attention_mask=_A )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_a : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = FlaxGPTJModelTester(self )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_A , _A , _A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_A , _A , _A , _A )
@tooslow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
__lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_A , truncation=_A )
__lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
__lowerCAmelCase = False
__lowerCAmelCase = model.config.eos_token_id
__lowerCAmelCase = jax.jit(model.generate )
__lowerCAmelCase = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__lowerCAmelCase = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_A , _A )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase = getattr(_A , _A )
__lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape
__lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = pt_model_class(_A ).eval()
__lowerCAmelCase = model_class(_A , dtype=jnp.floataa )
__lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A )
__lowerCAmelCase = fx_state
with torch.no_grad():
__lowerCAmelCase = pt_model(**_A ).to_tuple()
__lowerCAmelCase = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
__lowerCAmelCase = model_class.from_pretrained(_A , from_pt=_A )
__lowerCAmelCase = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase = getattr(_A , _A )
__lowerCAmelCase = pt_model_class(_A ).eval()
__lowerCAmelCase = model_class(_A , dtype=jnp.floataa )
__lowerCAmelCase = load_flax_weights_in_pytorch_model(_A , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape
__lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase = pt_model(**_A ).to_tuple()
__lowerCAmelCase = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
__lowerCAmelCase = pt_model_class.from_pretrained(_A , from_flax=_A )
with torch.no_grad():
__lowerCAmelCase = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
__lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 102 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowercase ( UpperCamelCase__ ):
_a = "visual_bert"
def __init__( self , _a=3_0522 , _a=768 , _a=512 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=False , _a=True , _a=1 , _a=0 , _a=2 , **_a , ) -> Tuple:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : int = vocab_size
_A : Dict = max_position_embeddings
_A : Optional[Any] = hidden_size
_A : List[Any] = visual_embedding_dim
_A : Optional[Any] = num_hidden_layers
_A : Tuple = num_attention_heads
_A : str = intermediate_size
_A : Dict = hidden_act
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Optional[int] = initializer_range
_A : List[Any] = type_vocab_size
_A : int = layer_norm_eps
_A : Optional[int] = bypass_transformer
_A : List[Any] = special_visual_initialize
| 26 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 64, 64)
_snake_case = torch.rand(1) * 9_99
_snake_case = torch.randn(2, 77, 7_68)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_66
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 283 | 0 |
"""simple docstring"""
from typing import List
import numpy as np
def lowerCamelCase_ (UpperCamelCase__ : dict ):
_UpperCAmelCase : Optional[Any] = {key: len(UpperCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_UpperCAmelCase : Any = max(lists_lengths.values() , default=0 )
return max(1 , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
_UpperCAmelCase : Optional[int] = []
for group_idx in range(UpperCamelCase__ ):
_UpperCAmelCase : Optional[int] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_UpperCAmelCase : List[Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_UpperCAmelCase : Dict = range(UpperCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(UpperCamelCase__ )
return shards_indices_per_group
def lowerCamelCase_ (UpperCamelCase__ : dict , UpperCamelCase__ : int ):
_UpperCAmelCase : str = _number_of_shards_in_gen_kwargs(UpperCamelCase__ )
if num_shards == 1:
return [dict(UpperCamelCase__ )]
else:
_UpperCAmelCase : Dict = _distribute_shards(num_shards=UpperCamelCase__ , max_num_jobs=UpperCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(UpperCamelCase__ ) )
]
def lowerCamelCase_ (UpperCamelCase__ : List[dict] ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , UpperCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCamelCase_ (UpperCamelCase__ : np.random.Generator , UpperCamelCase__ : dict ):
_UpperCAmelCase : List[str] = {len(UpperCamelCase__ ) for value in gen_kwargs.values() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
_UpperCAmelCase : List[str] = {}
for size in list_sizes:
_UpperCAmelCase : Tuple = list(range(UpperCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_UpperCAmelCase : int = dict(UpperCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : Optional[int] = [value[i] for i in indices_per_size[len(UpperCamelCase__ )]]
return shuffled_kwargs
| 360 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase_ ():
_UpperCAmelCase : Optional[int] = [randint(-1000 , 1000 ) for i in range(10 )]
_UpperCAmelCase : int = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase :Optional[Any] = make_dataset()
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
for triplet in permutations(UpperCamelCase__ , 3 ):
if sum(UpperCamelCase__ ) == target:
return tuple(sorted(UpperCamelCase__ ) )
return (0, 0, 0)
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
arr.sort()
_UpperCAmelCase : Optional[int] = len(UpperCamelCase__ )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase : Any = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase_ ():
_UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
_UpperCAmelCase : Any = '''
triplet_sum1(*dataset)
'''
_UpperCAmelCase : List[Any] = '''
triplet_sum2(*dataset)
'''
_UpperCAmelCase : List[Any] = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_0000 )
_UpperCAmelCase : List[Any] = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_0000 )
return (min(UpperCamelCase__ ), min(UpperCamelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase :List[str] = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 68 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.